blob: e8aeb07e1cb0de51bf0792e7b891138281f1b79d [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Marc Zyngiercc2d3212014-11-24 14:35:11 +00002/*
Marc Zyngierd7276b82016-12-20 15:11:47 +00003 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
Marc Zyngiercc2d3212014-11-24 14:35:11 +00004 * Author: Marc Zyngier <marc.zyngier@arm.com>
Marc Zyngiercc2d3212014-11-24 14:35:11 +00005 */
6
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02007#include <linux/acpi.h>
Hanjun Guo8d3554b2017-03-07 20:39:59 +08008#include <linux/acpi_iort.h>
Marc Zyngierffedbf02019-11-08 16:57:59 +00009#include <linux/bitfield.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000010#include <linux/bitmap.h>
11#include <linux/cpu.h>
Marc Zyngierc6e2ccb2018-06-26 11:21:11 +010012#include <linux/crash_dump.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000013#include <linux/delay.h>
Robin Murphy44bb7e22016-09-12 17:13:59 +010014#include <linux/dma-iommu.h>
Marc Zyngier3fb68fa2018-07-27 16:21:18 +010015#include <linux/efi.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000016#include <linux/interrupt.h>
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +020017#include <linux/irqdomain.h>
Marc Zyngier880cb3c2018-05-27 16:14:15 +010018#include <linux/list.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000019#include <linux/log2.h>
Marc Zyngier5e2c9f92018-07-27 16:23:18 +010020#include <linux/memblock.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000021#include <linux/mm.h>
22#include <linux/msi.h>
23#include <linux/of.h>
24#include <linux/of_address.h>
25#include <linux/of_irq.h>
26#include <linux/of_pci.h>
27#include <linux/of_platform.h>
28#include <linux/percpu.h>
29#include <linux/slab.h>
Derek Basehoredba0bc72018-02-28 21:48:18 -080030#include <linux/syscore_ops.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000031
Joel Porquet41a83e062015-07-07 17:11:46 -040032#include <linux/irqchip.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000033#include <linux/irqchip/arm-gic-v3.h>
Marc Zyngierc808eea2016-12-20 09:31:20 +000034#include <linux/irqchip/arm-gic-v4.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000035
Marc Zyngiercc2d3212014-11-24 14:35:11 +000036#include <asm/cputype.h>
37#include <asm/exception.h>
38
Robert Richter67510cc2015-09-21 22:58:37 +020039#include "irq-gic-common.h"
40
Robert Richter94100972015-09-21 22:58:38 +020041#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
42#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +020043#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
Derek Basehoredba0bc72018-02-28 21:48:18 -080044#define ITS_FLAGS_SAVE_SUSPEND_STATE (1ULL << 3)
Marc Zyngiercc2d3212014-11-24 14:35:11 +000045
Marc Zyngierc48ed512014-11-24 14:35:12 +000046#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
Marc Zyngierc440a9d2018-07-27 15:40:13 +010047#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1)
Marc Zyngierc48ed512014-11-24 14:35:12 +000048
Marc Zyngiera13b0402016-12-19 17:15:24 +000049static u32 lpi_id_bits;
50
51/*
52 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
53 * deal with (one configuration byte per interrupt). PENDBASE has to
54 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
55 */
56#define LPI_NRBITS lpi_id_bits
57#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
58#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
59
Julien Thierry2130b782018-08-28 16:51:18 +010060#define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI
Marc Zyngiera13b0402016-12-19 17:15:24 +000061
Marc Zyngiercc2d3212014-11-24 14:35:11 +000062/*
63 * Collection structure - just an ID, and a redistributor address to
64 * ping. We use one per CPU as a bag of interrupts assigned to this
65 * CPU.
66 */
67struct its_collection {
68 u64 target_address;
69 u16 col_id;
70};
71
72/*
Shanker Donthineni93473592016-06-06 18:17:30 -050073 * The ITS_BASER structure - contains memory information, cached
74 * value of BASER register configuration and ITS page size.
Shanker Donthineni466b7d12016-03-09 22:10:49 -060075 */
76struct its_baser {
77 void *base;
78 u64 val;
79 u32 order;
Shanker Donthineni93473592016-06-06 18:17:30 -050080 u32 psz;
Shanker Donthineni466b7d12016-03-09 22:10:49 -060081};
82
Ard Biesheuvel558b0162017-10-17 17:55:56 +010083struct its_device;
84
Shanker Donthineni466b7d12016-03-09 22:10:49 -060085/*
Marc Zyngiercc2d3212014-11-24 14:35:11 +000086 * The ITS structure - contains most of the infrastructure, with the
Marc Zyngier841514a2015-07-28 14:46:20 +010087 * top-level MSI domain, the command queue, the collections, and the
88 * list of devices writing to it.
Marc Zyngier9791ec72019-01-29 10:02:33 +000089 *
90 * dev_alloc_lock has to be taken for device allocations, while the
91 * spinlock must be taken to parse data structures such as the device
92 * list.
Marc Zyngiercc2d3212014-11-24 14:35:11 +000093 */
94struct its_node {
95 raw_spinlock_t lock;
Marc Zyngier9791ec72019-01-29 10:02:33 +000096 struct mutex dev_alloc_lock;
Marc Zyngiercc2d3212014-11-24 14:35:11 +000097 struct list_head entry;
Marc Zyngiercc2d3212014-11-24 14:35:11 +000098 void __iomem *base;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +020099 phys_addr_t phys_base;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000100 struct its_cmd_block *cmd_base;
101 struct its_cmd_block *cmd_write;
Shanker Donthineni466b7d12016-03-09 22:10:49 -0600102 struct its_baser tables[GITS_BASER_NR_REGS];
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000103 struct its_collection *collections;
Ard Biesheuvel558b0162017-10-17 17:55:56 +0100104 struct fwnode_handle *fwnode_handle;
105 u64 (*get_msi_base)(struct its_device *its_dev);
Marc Zyngier0dd57fe2019-11-08 16:57:58 +0000106 u64 typer;
Derek Basehoredba0bc72018-02-28 21:48:18 -0800107 u64 cbaser_save;
108 u32 ctlr_save;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000109 struct list_head its_device_list;
110 u64 flags;
Marc Zyngierdebf6d02017-10-08 18:44:42 +0100111 unsigned long list_nr;
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +0200112 int numa_node;
Ard Biesheuvel558b0162017-10-17 17:55:56 +0100113 unsigned int msi_domain_flags;
114 u32 pre_its_base; /* for Socionext Synquacer */
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100115 int vlpi_redist_offset;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000116};
117
Marc Zyngier0dd57fe2019-11-08 16:57:58 +0000118#define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS))
Marc Zyngier576a8342019-11-08 16:58:00 +0000119#define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
Marc Zyngier0dd57fe2019-11-08 16:57:58 +0000120
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000121#define ITS_ITT_ALIGN SZ_256
122
Shanker Donthineni32bd44d2017-10-07 15:43:48 -0500123/* The maximum number of VPEID bits supported by VLPI commands */
124#define ITS_MAX_VPEID_BITS (16)
125#define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
126
Shanker Donthineni2eca0d62016-02-16 18:00:36 -0600127/* Convert page order to size in bytes */
128#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
129
Marc Zyngier591e5be2015-07-17 10:46:42 +0100130struct event_lpi_map {
131 unsigned long *lpi_map;
132 u16 *col_map;
133 irq_hw_number_t lpi_base;
134 int nr_lpis;
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000135 struct mutex vlpi_lock;
136 struct its_vm *vm;
137 struct its_vlpi_map *vlpi_maps;
138 int nr_vlpis;
Marc Zyngier591e5be2015-07-17 10:46:42 +0100139};
140
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000141/*
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000142 * The ITS view of a device - belongs to an ITS, owns an interrupt
143 * translation table, and a list of interrupts. If it some of its
144 * LPIs are injected into a guest (GICv4), the event_map.vm field
145 * indicates which one.
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000146 */
147struct its_device {
148 struct list_head entry;
149 struct its_node *its;
Marc Zyngier591e5be2015-07-17 10:46:42 +0100150 struct event_lpi_map event_map;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000151 void *itt;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000152 u32 nr_ites;
153 u32 device_id;
Marc Zyngier9791ec72019-01-29 10:02:33 +0000154 bool shared;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000155};
156
Marc Zyngier20b3d542016-12-20 15:23:22 +0000157static struct {
158 raw_spinlock_t lock;
159 struct its_device *dev;
160 struct its_vpe **vpes;
161 int next_victim;
162} vpe_proxy;
163
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000164static LIST_HEAD(its_nodes);
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +0200165static DEFINE_RAW_SPINLOCK(its_lock);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000166static struct rdists *gic_rdists;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +0200167static struct irq_domain *its_parent;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000168
Marc Zyngier3dfa5762016-12-19 17:25:54 +0000169static unsigned long its_list_map;
Marc Zyngier3171a472016-12-20 15:17:28 +0000170static u16 vmovp_seq_num;
171static DEFINE_RAW_SPINLOCK(vmovp_lock);
172
Marc Zyngier7d75bbb2016-12-20 13:55:54 +0000173static DEFINE_IDA(its_vpeid_ida);
Marc Zyngier3dfa5762016-12-19 17:25:54 +0000174
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000175#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
Marc Zyngier11e37d32018-07-27 13:38:54 +0100176#define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000177#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
Marc Zyngiere643d802016-12-20 15:09:31 +0000178#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000179
Zenghui Yu84243122019-10-23 03:46:26 +0000180static u16 get_its_list(struct its_vm *vm)
181{
182 struct its_node *its;
183 unsigned long its_list = 0;
184
185 list_for_each_entry(its, &its_nodes, entry) {
Marc Zyngier0dd57fe2019-11-08 16:57:58 +0000186 if (!is_v4(its))
Zenghui Yu84243122019-10-23 03:46:26 +0000187 continue;
188
189 if (vm->vlpi_count[its->list_nr])
190 __set_bit(its->list_nr, &its_list);
191 }
192
193 return (u16)its_list;
194}
195
Marc Zyngier425c09b2019-11-08 16:57:57 +0000196static inline u32 its_get_event_id(struct irq_data *d)
197{
198 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
199 return d->hwirq - its_dev->event_map.lpi_base;
200}
201
Marc Zyngier591e5be2015-07-17 10:46:42 +0100202static struct its_collection *dev_event_to_col(struct its_device *its_dev,
203 u32 event)
204{
205 struct its_node *its = its_dev->its;
206
207 return its->collections + its_dev->event_map.col_map[event];
208}
209
Marc Zyngier425c09b2019-11-08 16:57:57 +0000210static struct its_collection *irq_to_col(struct irq_data *d)
211{
212 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
213
214 return dev_event_to_col(its_dev, its_get_event_id(d));
215}
216
Marc Zyngier83559b42018-06-22 10:52:52 +0100217static struct its_collection *valid_col(struct its_collection *col)
218{
Joe Perches20faba82019-07-09 22:04:18 -0700219 if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
Marc Zyngier83559b42018-06-22 10:52:52 +0100220 return NULL;
221
222 return col;
223}
224
Marc Zyngier205e0652018-06-22 10:52:53 +0100225static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
226{
227 if (valid_col(its->collections + vpe->col_idx))
228 return vpe;
229
230 return NULL;
231}
232
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000233/*
234 * ITS command descriptors - parameters to be encoded in a command
235 * block.
236 */
237struct its_cmd_desc {
238 union {
239 struct {
240 struct its_device *dev;
241 u32 event_id;
242 } its_inv_cmd;
243
244 struct {
245 struct its_device *dev;
246 u32 event_id;
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000247 } its_clear_cmd;
248
249 struct {
250 struct its_device *dev;
251 u32 event_id;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000252 } its_int_cmd;
253
254 struct {
255 struct its_device *dev;
256 int valid;
257 } its_mapd_cmd;
258
259 struct {
260 struct its_collection *col;
261 int valid;
262 } its_mapc_cmd;
263
264 struct {
265 struct its_device *dev;
266 u32 phys_id;
267 u32 event_id;
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000268 } its_mapti_cmd;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000269
270 struct {
271 struct its_device *dev;
272 struct its_collection *col;
Marc Zyngier591e5be2015-07-17 10:46:42 +0100273 u32 event_id;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000274 } its_movi_cmd;
275
276 struct {
277 struct its_device *dev;
278 u32 event_id;
279 } its_discard_cmd;
280
281 struct {
282 struct its_collection *col;
283 } its_invall_cmd;
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000284
285 struct {
286 struct its_vpe *vpe;
Marc Zyngiereb781922016-12-20 14:47:05 +0000287 } its_vinvall_cmd;
288
289 struct {
290 struct its_vpe *vpe;
291 struct its_collection *col;
292 bool valid;
293 } its_vmapp_cmd;
294
295 struct {
296 struct its_vpe *vpe;
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000297 struct its_device *dev;
298 u32 virt_id;
299 u32 event_id;
300 bool db_enabled;
301 } its_vmapti_cmd;
302
303 struct {
304 struct its_vpe *vpe;
305 struct its_device *dev;
306 u32 event_id;
307 bool db_enabled;
308 } its_vmovi_cmd;
Marc Zyngier3171a472016-12-20 15:17:28 +0000309
310 struct {
311 struct its_vpe *vpe;
312 struct its_collection *col;
313 u16 seq_num;
314 u16 its_list;
315 } its_vmovp_cmd;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000316 };
317};
318
319/*
320 * The ITS command block, which is what the ITS actually parses.
321 */
322struct its_cmd_block {
Ben Dooks (Codethink)2bbdfcc2019-10-17 12:29:55 +0100323 union {
324 u64 raw_cmd[4];
325 __le64 raw_cmd_le[4];
326 };
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000327};
328
329#define ITS_CMD_QUEUE_SZ SZ_64K
330#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
331
Marc Zyngier67047f902017-07-28 21:16:58 +0100332typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
333 struct its_cmd_block *,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000334 struct its_cmd_desc *);
335
Marc Zyngier67047f902017-07-28 21:16:58 +0100336typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
337 struct its_cmd_block *,
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000338 struct its_cmd_desc *);
339
Marc Zyngier4d36f132016-12-19 17:11:52 +0000340static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
341{
342 u64 mask = GENMASK_ULL(h, l);
343 *raw_cmd &= ~mask;
344 *raw_cmd |= (val << l) & mask;
345}
346
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000347static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
348{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000349 its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000350}
351
352static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
353{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000354 its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000355}
356
357static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
358{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000359 its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000360}
361
362static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
363{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000364 its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000365}
366
367static void its_encode_size(struct its_cmd_block *cmd, u8 size)
368{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000369 its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000370}
371
372static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
373{
Shanker Donthineni30ae9612017-10-09 11:46:55 -0500374 its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000375}
376
377static void its_encode_valid(struct its_cmd_block *cmd, int valid)
378{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000379 its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000380}
381
382static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
383{
Shanker Donthineni30ae9612017-10-09 11:46:55 -0500384 its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000385}
386
387static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
388{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000389 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000390}
391
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000392static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
393{
394 its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
395}
396
397static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
398{
399 its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
400}
401
402static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
403{
404 its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
405}
406
407static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
408{
409 its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
410}
411
Marc Zyngier3171a472016-12-20 15:17:28 +0000412static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
413{
414 its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
415}
416
417static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
418{
419 its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
420}
421
Marc Zyngiereb781922016-12-20 14:47:05 +0000422static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
423{
Shanker Donthineni30ae9612017-10-09 11:46:55 -0500424 its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
Marc Zyngiereb781922016-12-20 14:47:05 +0000425}
426
427static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
428{
429 its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
430}
431
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000432static inline void its_fixup_cmd(struct its_cmd_block *cmd)
433{
434 /* Let's fixup BE commands */
Ben Dooks (Codethink)2bbdfcc2019-10-17 12:29:55 +0100435 cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]);
436 cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]);
437 cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]);
438 cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000439}
440
Marc Zyngier67047f902017-07-28 21:16:58 +0100441static struct its_collection *its_build_mapd_cmd(struct its_node *its,
442 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000443 struct its_cmd_desc *desc)
444{
445 unsigned long itt_addr;
Marc Zyngierc8481262014-12-12 10:51:24 +0000446 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000447
448 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
449 itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
450
451 its_encode_cmd(cmd, GITS_CMD_MAPD);
452 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
453 its_encode_size(cmd, size - 1);
454 its_encode_itt(cmd, itt_addr);
455 its_encode_valid(cmd, desc->its_mapd_cmd.valid);
456
457 its_fixup_cmd(cmd);
458
Marc Zyngier591e5be2015-07-17 10:46:42 +0100459 return NULL;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000460}
461
Marc Zyngier67047f902017-07-28 21:16:58 +0100462static struct its_collection *its_build_mapc_cmd(struct its_node *its,
463 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000464 struct its_cmd_desc *desc)
465{
466 its_encode_cmd(cmd, GITS_CMD_MAPC);
467 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
468 its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
469 its_encode_valid(cmd, desc->its_mapc_cmd.valid);
470
471 its_fixup_cmd(cmd);
472
473 return desc->its_mapc_cmd.col;
474}
475
Marc Zyngier67047f902017-07-28 21:16:58 +0100476static struct its_collection *its_build_mapti_cmd(struct its_node *its,
477 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000478 struct its_cmd_desc *desc)
479{
Marc Zyngier591e5be2015-07-17 10:46:42 +0100480 struct its_collection *col;
481
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000482 col = dev_event_to_col(desc->its_mapti_cmd.dev,
483 desc->its_mapti_cmd.event_id);
Marc Zyngier591e5be2015-07-17 10:46:42 +0100484
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000485 its_encode_cmd(cmd, GITS_CMD_MAPTI);
486 its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
487 its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
488 its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
Marc Zyngier591e5be2015-07-17 10:46:42 +0100489 its_encode_collection(cmd, col->col_id);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000490
491 its_fixup_cmd(cmd);
492
Marc Zyngier83559b42018-06-22 10:52:52 +0100493 return valid_col(col);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000494}
495
Marc Zyngier67047f902017-07-28 21:16:58 +0100496static struct its_collection *its_build_movi_cmd(struct its_node *its,
497 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000498 struct its_cmd_desc *desc)
499{
Marc Zyngier591e5be2015-07-17 10:46:42 +0100500 struct its_collection *col;
501
502 col = dev_event_to_col(desc->its_movi_cmd.dev,
503 desc->its_movi_cmd.event_id);
504
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000505 its_encode_cmd(cmd, GITS_CMD_MOVI);
506 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
Marc Zyngier591e5be2015-07-17 10:46:42 +0100507 its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000508 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
509
510 its_fixup_cmd(cmd);
511
Marc Zyngier83559b42018-06-22 10:52:52 +0100512 return valid_col(col);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000513}
514
Marc Zyngier67047f902017-07-28 21:16:58 +0100515static struct its_collection *its_build_discard_cmd(struct its_node *its,
516 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000517 struct its_cmd_desc *desc)
518{
Marc Zyngier591e5be2015-07-17 10:46:42 +0100519 struct its_collection *col;
520
521 col = dev_event_to_col(desc->its_discard_cmd.dev,
522 desc->its_discard_cmd.event_id);
523
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000524 its_encode_cmd(cmd, GITS_CMD_DISCARD);
525 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
526 its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
527
528 its_fixup_cmd(cmd);
529
Marc Zyngier83559b42018-06-22 10:52:52 +0100530 return valid_col(col);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000531}
532
Marc Zyngier67047f902017-07-28 21:16:58 +0100533static struct its_collection *its_build_inv_cmd(struct its_node *its,
534 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000535 struct its_cmd_desc *desc)
536{
Marc Zyngier591e5be2015-07-17 10:46:42 +0100537 struct its_collection *col;
538
539 col = dev_event_to_col(desc->its_inv_cmd.dev,
540 desc->its_inv_cmd.event_id);
541
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000542 its_encode_cmd(cmd, GITS_CMD_INV);
543 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
544 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
545
546 its_fixup_cmd(cmd);
547
Marc Zyngier83559b42018-06-22 10:52:52 +0100548 return valid_col(col);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000549}
550
Marc Zyngier67047f902017-07-28 21:16:58 +0100551static struct its_collection *its_build_int_cmd(struct its_node *its,
552 struct its_cmd_block *cmd,
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000553 struct its_cmd_desc *desc)
554{
555 struct its_collection *col;
556
557 col = dev_event_to_col(desc->its_int_cmd.dev,
558 desc->its_int_cmd.event_id);
559
560 its_encode_cmd(cmd, GITS_CMD_INT);
561 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
562 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
563
564 its_fixup_cmd(cmd);
565
Marc Zyngier83559b42018-06-22 10:52:52 +0100566 return valid_col(col);
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000567}
568
Marc Zyngier67047f902017-07-28 21:16:58 +0100569static struct its_collection *its_build_clear_cmd(struct its_node *its,
570 struct its_cmd_block *cmd,
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000571 struct its_cmd_desc *desc)
572{
573 struct its_collection *col;
574
575 col = dev_event_to_col(desc->its_clear_cmd.dev,
576 desc->its_clear_cmd.event_id);
577
578 its_encode_cmd(cmd, GITS_CMD_CLEAR);
579 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
580 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
581
582 its_fixup_cmd(cmd);
583
Marc Zyngier83559b42018-06-22 10:52:52 +0100584 return valid_col(col);
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000585}
586
Marc Zyngier67047f902017-07-28 21:16:58 +0100587static struct its_collection *its_build_invall_cmd(struct its_node *its,
588 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000589 struct its_cmd_desc *desc)
590{
591 its_encode_cmd(cmd, GITS_CMD_INVALL);
592 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
593
594 its_fixup_cmd(cmd);
595
596 return NULL;
597}
598
Marc Zyngier67047f902017-07-28 21:16:58 +0100599static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
600 struct its_cmd_block *cmd,
Marc Zyngiereb781922016-12-20 14:47:05 +0000601 struct its_cmd_desc *desc)
602{
603 its_encode_cmd(cmd, GITS_CMD_VINVALL);
604 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
605
606 its_fixup_cmd(cmd);
607
Marc Zyngier205e0652018-06-22 10:52:53 +0100608 return valid_vpe(its, desc->its_vinvall_cmd.vpe);
Marc Zyngiereb781922016-12-20 14:47:05 +0000609}
610
Marc Zyngier67047f902017-07-28 21:16:58 +0100611static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
612 struct its_cmd_block *cmd,
Marc Zyngiereb781922016-12-20 14:47:05 +0000613 struct its_cmd_desc *desc)
614{
615 unsigned long vpt_addr;
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100616 u64 target;
Marc Zyngiereb781922016-12-20 14:47:05 +0000617
618 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100619 target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
Marc Zyngiereb781922016-12-20 14:47:05 +0000620
621 its_encode_cmd(cmd, GITS_CMD_VMAPP);
622 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
623 its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100624 its_encode_target(cmd, target);
Marc Zyngiereb781922016-12-20 14:47:05 +0000625 its_encode_vpt_addr(cmd, vpt_addr);
626 its_encode_vpt_size(cmd, LPI_NRBITS - 1);
627
628 its_fixup_cmd(cmd);
629
Marc Zyngier205e0652018-06-22 10:52:53 +0100630 return valid_vpe(its, desc->its_vmapp_cmd.vpe);
Marc Zyngiereb781922016-12-20 14:47:05 +0000631}
632
Marc Zyngier67047f902017-07-28 21:16:58 +0100633static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
634 struct its_cmd_block *cmd,
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000635 struct its_cmd_desc *desc)
636{
637 u32 db;
638
639 if (desc->its_vmapti_cmd.db_enabled)
640 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
641 else
642 db = 1023;
643
644 its_encode_cmd(cmd, GITS_CMD_VMAPTI);
645 its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
646 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
647 its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
648 its_encode_db_phys_id(cmd, db);
649 its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
650
651 its_fixup_cmd(cmd);
652
Marc Zyngier205e0652018-06-22 10:52:53 +0100653 return valid_vpe(its, desc->its_vmapti_cmd.vpe);
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000654}
655
Marc Zyngier67047f902017-07-28 21:16:58 +0100656static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
657 struct its_cmd_block *cmd,
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000658 struct its_cmd_desc *desc)
659{
660 u32 db;
661
662 if (desc->its_vmovi_cmd.db_enabled)
663 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
664 else
665 db = 1023;
666
667 its_encode_cmd(cmd, GITS_CMD_VMOVI);
668 its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
669 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
670 its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
671 its_encode_db_phys_id(cmd, db);
672 its_encode_db_valid(cmd, true);
673
674 its_fixup_cmd(cmd);
675
Marc Zyngier205e0652018-06-22 10:52:53 +0100676 return valid_vpe(its, desc->its_vmovi_cmd.vpe);
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000677}
678
Marc Zyngier67047f902017-07-28 21:16:58 +0100679static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
680 struct its_cmd_block *cmd,
Marc Zyngier3171a472016-12-20 15:17:28 +0000681 struct its_cmd_desc *desc)
682{
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100683 u64 target;
684
685 target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
Marc Zyngier3171a472016-12-20 15:17:28 +0000686 its_encode_cmd(cmd, GITS_CMD_VMOVP);
687 its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
688 its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
689 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100690 its_encode_target(cmd, target);
Marc Zyngier3171a472016-12-20 15:17:28 +0000691
692 its_fixup_cmd(cmd);
693
Marc Zyngier205e0652018-06-22 10:52:53 +0100694 return valid_vpe(its, desc->its_vmovp_cmd.vpe);
Marc Zyngier3171a472016-12-20 15:17:28 +0000695}
696
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000697static u64 its_cmd_ptr_to_offset(struct its_node *its,
698 struct its_cmd_block *ptr)
699{
700 return (ptr - its->cmd_base) * sizeof(*ptr);
701}
702
703static int its_queue_full(struct its_node *its)
704{
705 int widx;
706 int ridx;
707
708 widx = its->cmd_write - its->cmd_base;
709 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
710
711 /* This is incredibly unlikely to happen, unless the ITS locks up. */
712 if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
713 return 1;
714
715 return 0;
716}
717
718static struct its_cmd_block *its_allocate_entry(struct its_node *its)
719{
720 struct its_cmd_block *cmd;
721 u32 count = 1000000; /* 1s! */
722
723 while (its_queue_full(its)) {
724 count--;
725 if (!count) {
726 pr_err_ratelimited("ITS queue not draining\n");
727 return NULL;
728 }
729 cpu_relax();
730 udelay(1);
731 }
732
733 cmd = its->cmd_write++;
734
735 /* Handle queue wrapping */
736 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
737 its->cmd_write = its->cmd_base;
738
Marc Zyngier34d677a2016-12-19 17:16:45 +0000739 /* Clear command */
740 cmd->raw_cmd[0] = 0;
741 cmd->raw_cmd[1] = 0;
742 cmd->raw_cmd[2] = 0;
743 cmd->raw_cmd[3] = 0;
744
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000745 return cmd;
746}
747
748static struct its_cmd_block *its_post_commands(struct its_node *its)
749{
750 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
751
752 writel_relaxed(wr, its->base + GITS_CWRITER);
753
754 return its->cmd_write;
755}
756
757static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
758{
759 /*
760 * Make sure the commands written to memory are observable by
761 * the ITS.
762 */
763 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
Vladimir Murzin328191c2016-11-02 11:54:05 +0000764 gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000765 else
766 dsb(ishst);
767}
768
Marc Zyngiera19b4622017-08-04 17:45:50 +0100769static int its_wait_for_range_completion(struct its_node *its,
Heyi Guoa050fa52019-05-13 19:42:06 +0800770 u64 prev_idx,
Marc Zyngiera19b4622017-08-04 17:45:50 +0100771 struct its_cmd_block *to)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000772{
Heyi Guoa050fa52019-05-13 19:42:06 +0800773 u64 rd_idx, to_idx, linear_idx;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000774 u32 count = 1000000; /* 1s! */
775
Heyi Guoa050fa52019-05-13 19:42:06 +0800776 /* Linearize to_idx if the command set has wrapped around */
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000777 to_idx = its_cmd_ptr_to_offset(its, to);
Heyi Guoa050fa52019-05-13 19:42:06 +0800778 if (to_idx < prev_idx)
779 to_idx += ITS_CMD_QUEUE_SZ;
780
781 linear_idx = prev_idx;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000782
783 while (1) {
Heyi Guoa050fa52019-05-13 19:42:06 +0800784 s64 delta;
785
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000786 rd_idx = readl_relaxed(its->base + GITS_CREADR);
Marc Zyngier9bdd8b12017-08-19 10:16:02 +0100787
Heyi Guoa050fa52019-05-13 19:42:06 +0800788 /*
789 * Compute the read pointer progress, taking the
790 * potential wrap-around into account.
791 */
792 delta = rd_idx - prev_idx;
793 if (rd_idx < prev_idx)
794 delta += ITS_CMD_QUEUE_SZ;
Marc Zyngier9bdd8b12017-08-19 10:16:02 +0100795
Heyi Guoa050fa52019-05-13 19:42:06 +0800796 linear_idx += delta;
797 if (linear_idx >= to_idx)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000798 break;
799
800 count--;
801 if (!count) {
Heyi Guoa050fa52019-05-13 19:42:06 +0800802 pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
803 to_idx, linear_idx);
Marc Zyngiera19b4622017-08-04 17:45:50 +0100804 return -1;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000805 }
Heyi Guoa050fa52019-05-13 19:42:06 +0800806 prev_idx = rd_idx;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000807 cpu_relax();
808 udelay(1);
809 }
Marc Zyngiera19b4622017-08-04 17:45:50 +0100810
811 return 0;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000812}
813
Marc Zyngiere4f90942016-12-19 17:56:32 +0000814/* Warning, macro hell follows */
815#define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
816void name(struct its_node *its, \
817 buildtype builder, \
818 struct its_cmd_desc *desc) \
819{ \
820 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
821 synctype *sync_obj; \
822 unsigned long flags; \
Heyi Guoa050fa52019-05-13 19:42:06 +0800823 u64 rd_idx; \
Marc Zyngiere4f90942016-12-19 17:56:32 +0000824 \
825 raw_spin_lock_irqsave(&its->lock, flags); \
826 \
827 cmd = its_allocate_entry(its); \
828 if (!cmd) { /* We're soooooo screewed... */ \
829 raw_spin_unlock_irqrestore(&its->lock, flags); \
830 return; \
831 } \
Marc Zyngier67047f902017-07-28 21:16:58 +0100832 sync_obj = builder(its, cmd, desc); \
Marc Zyngiere4f90942016-12-19 17:56:32 +0000833 its_flush_cmd(its, cmd); \
834 \
835 if (sync_obj) { \
836 sync_cmd = its_allocate_entry(its); \
837 if (!sync_cmd) \
838 goto post; \
839 \
Marc Zyngier67047f902017-07-28 21:16:58 +0100840 buildfn(its, sync_cmd, sync_obj); \
Marc Zyngiere4f90942016-12-19 17:56:32 +0000841 its_flush_cmd(its, sync_cmd); \
842 } \
843 \
844post: \
Heyi Guoa050fa52019-05-13 19:42:06 +0800845 rd_idx = readl_relaxed(its->base + GITS_CREADR); \
Marc Zyngiere4f90942016-12-19 17:56:32 +0000846 next_cmd = its_post_commands(its); \
847 raw_spin_unlock_irqrestore(&its->lock, flags); \
848 \
Heyi Guoa050fa52019-05-13 19:42:06 +0800849 if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
Marc Zyngiera19b4622017-08-04 17:45:50 +0100850 pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000851}
852
Marc Zyngier67047f902017-07-28 21:16:58 +0100853static void its_build_sync_cmd(struct its_node *its,
854 struct its_cmd_block *sync_cmd,
Marc Zyngiere4f90942016-12-19 17:56:32 +0000855 struct its_collection *sync_col)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000856{
Marc Zyngiere4f90942016-12-19 17:56:32 +0000857 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
858 its_encode_target(sync_cmd, sync_col->target_address);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000859
Marc Zyngiere4f90942016-12-19 17:56:32 +0000860 its_fixup_cmd(sync_cmd);
861}
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000862
Marc Zyngiere4f90942016-12-19 17:56:32 +0000863static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
864 struct its_collection, its_build_sync_cmd)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000865
Marc Zyngier67047f902017-07-28 21:16:58 +0100866static void its_build_vsync_cmd(struct its_node *its,
867 struct its_cmd_block *sync_cmd,
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000868 struct its_vpe *sync_vpe)
869{
870 its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
871 its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000872
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000873 its_fixup_cmd(sync_cmd);
874}
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000875
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000876static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
877 struct its_vpe, its_build_vsync_cmd)
878
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000879static void its_send_int(struct its_device *dev, u32 event_id)
880{
881 struct its_cmd_desc desc;
882
883 desc.its_int_cmd.dev = dev;
884 desc.its_int_cmd.event_id = event_id;
885
886 its_send_single_command(dev->its, its_build_int_cmd, &desc);
887}
888
889static void its_send_clear(struct its_device *dev, u32 event_id)
890{
891 struct its_cmd_desc desc;
892
893 desc.its_clear_cmd.dev = dev;
894 desc.its_clear_cmd.event_id = event_id;
895
896 its_send_single_command(dev->its, its_build_clear_cmd, &desc);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000897}
898
899static void its_send_inv(struct its_device *dev, u32 event_id)
900{
901 struct its_cmd_desc desc;
902
903 desc.its_inv_cmd.dev = dev;
904 desc.its_inv_cmd.event_id = event_id;
905
906 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
907}
908
909static void its_send_mapd(struct its_device *dev, int valid)
910{
911 struct its_cmd_desc desc;
912
913 desc.its_mapd_cmd.dev = dev;
914 desc.its_mapd_cmd.valid = !!valid;
915
916 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
917}
918
919static void its_send_mapc(struct its_node *its, struct its_collection *col,
920 int valid)
921{
922 struct its_cmd_desc desc;
923
924 desc.its_mapc_cmd.col = col;
925 desc.its_mapc_cmd.valid = !!valid;
926
927 its_send_single_command(its, its_build_mapc_cmd, &desc);
928}
929
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000930static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000931{
932 struct its_cmd_desc desc;
933
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000934 desc.its_mapti_cmd.dev = dev;
935 desc.its_mapti_cmd.phys_id = irq_id;
936 desc.its_mapti_cmd.event_id = id;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000937
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000938 its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000939}
940
941static void its_send_movi(struct its_device *dev,
942 struct its_collection *col, u32 id)
943{
944 struct its_cmd_desc desc;
945
946 desc.its_movi_cmd.dev = dev;
947 desc.its_movi_cmd.col = col;
Marc Zyngier591e5be2015-07-17 10:46:42 +0100948 desc.its_movi_cmd.event_id = id;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000949
950 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
951}
952
953static void its_send_discard(struct its_device *dev, u32 id)
954{
955 struct its_cmd_desc desc;
956
957 desc.its_discard_cmd.dev = dev;
958 desc.its_discard_cmd.event_id = id;
959
960 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
961}
962
963static void its_send_invall(struct its_node *its, struct its_collection *col)
964{
965 struct its_cmd_desc desc;
966
967 desc.its_invall_cmd.col = col;
968
969 its_send_single_command(its, its_build_invall_cmd, &desc);
970}
Marc Zyngierc48ed512014-11-24 14:35:12 +0000971
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000972static void its_send_vmapti(struct its_device *dev, u32 id)
973{
974 struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
975 struct its_cmd_desc desc;
976
977 desc.its_vmapti_cmd.vpe = map->vpe;
978 desc.its_vmapti_cmd.dev = dev;
979 desc.its_vmapti_cmd.virt_id = map->vintid;
980 desc.its_vmapti_cmd.event_id = id;
981 desc.its_vmapti_cmd.db_enabled = map->db_enabled;
982
983 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
984}
985
986static void its_send_vmovi(struct its_device *dev, u32 id)
987{
988 struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
989 struct its_cmd_desc desc;
990
991 desc.its_vmovi_cmd.vpe = map->vpe;
992 desc.its_vmovi_cmd.dev = dev;
993 desc.its_vmovi_cmd.event_id = id;
994 desc.its_vmovi_cmd.db_enabled = map->db_enabled;
995
996 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
997}
998
Marc Zyngier75fd9512017-10-08 18:46:39 +0100999static void its_send_vmapp(struct its_node *its,
1000 struct its_vpe *vpe, bool valid)
Marc Zyngiereb781922016-12-20 14:47:05 +00001001{
1002 struct its_cmd_desc desc;
Marc Zyngiereb781922016-12-20 14:47:05 +00001003
1004 desc.its_vmapp_cmd.vpe = vpe;
1005 desc.its_vmapp_cmd.valid = valid;
Marc Zyngier75fd9512017-10-08 18:46:39 +01001006 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
Marc Zyngiereb781922016-12-20 14:47:05 +00001007
Marc Zyngier75fd9512017-10-08 18:46:39 +01001008 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
Marc Zyngiereb781922016-12-20 14:47:05 +00001009}
1010
Marc Zyngier3171a472016-12-20 15:17:28 +00001011static void its_send_vmovp(struct its_vpe *vpe)
1012{
Zenghui Yu84243122019-10-23 03:46:26 +00001013 struct its_cmd_desc desc = {};
Marc Zyngier3171a472016-12-20 15:17:28 +00001014 struct its_node *its;
1015 unsigned long flags;
1016 int col_id = vpe->col_idx;
1017
1018 desc.its_vmovp_cmd.vpe = vpe;
Marc Zyngier3171a472016-12-20 15:17:28 +00001019
1020 if (!its_list_map) {
1021 its = list_first_entry(&its_nodes, struct its_node, entry);
Marc Zyngier3171a472016-12-20 15:17:28 +00001022 desc.its_vmovp_cmd.col = &its->collections[col_id];
1023 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1024 return;
1025 }
1026
1027 /*
1028 * Yet another marvel of the architecture. If using the
1029 * its_list "feature", we need to make sure that all ITSs
1030 * receive all VMOVP commands in the same order. The only way
1031 * to guarantee this is to make vmovp a serialization point.
1032 *
1033 * Wall <-- Head.
1034 */
1035 raw_spin_lock_irqsave(&vmovp_lock, flags);
1036
1037 desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
Zenghui Yu84243122019-10-23 03:46:26 +00001038 desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
Marc Zyngier3171a472016-12-20 15:17:28 +00001039
1040 /* Emit VMOVPs */
1041 list_for_each_entry(its, &its_nodes, entry) {
Marc Zyngier0dd57fe2019-11-08 16:57:58 +00001042 if (!is_v4(its))
Marc Zyngier3171a472016-12-20 15:17:28 +00001043 continue;
1044
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001045 if (!vpe->its_vm->vlpi_count[its->list_nr])
1046 continue;
1047
Marc Zyngier3171a472016-12-20 15:17:28 +00001048 desc.its_vmovp_cmd.col = &its->collections[col_id];
1049 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1050 }
1051
1052 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1053}
1054
Marc Zyngier40619a22017-10-08 15:16:09 +01001055static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
Marc Zyngiereb781922016-12-20 14:47:05 +00001056{
1057 struct its_cmd_desc desc;
Marc Zyngiereb781922016-12-20 14:47:05 +00001058
1059 desc.its_vinvall_cmd.vpe = vpe;
Marc Zyngier40619a22017-10-08 15:16:09 +01001060 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
Marc Zyngiereb781922016-12-20 14:47:05 +00001061}
1062
Marc Zyngierc48ed512014-11-24 14:35:12 +00001063/*
1064 * irqchip functions - assumes MSI, mostly.
1065 */
1066
Marc Zyngier015ec032016-12-20 09:54:57 +00001067static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
Marc Zyngierc48ed512014-11-24 14:35:12 +00001068{
Marc Zyngier015ec032016-12-20 09:54:57 +00001069 irq_hw_number_t hwirq;
Marc Zyngiere1a2e202018-07-27 14:36:00 +01001070 void *va;
Marc Zyngieradcdb942016-12-19 19:18:13 +00001071 u8 *cfg;
Marc Zyngierc48ed512014-11-24 14:35:12 +00001072
Marc Zyngier015ec032016-12-20 09:54:57 +00001073 if (irqd_is_forwarded_to_vcpu(d)) {
1074 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1075 u32 event = its_get_event_id(d);
Marc Zyngierd4d7b4a2017-10-26 10:44:07 +01001076 struct its_vlpi_map *map;
Marc Zyngier015ec032016-12-20 09:54:57 +00001077
Marc Zyngiere1a2e202018-07-27 14:36:00 +01001078 va = page_address(its_dev->event_map.vm->vprop_page);
Marc Zyngierd4d7b4a2017-10-26 10:44:07 +01001079 map = &its_dev->event_map.vlpi_maps[event];
1080 hwirq = map->vintid;
1081
1082 /* Remember the updated property */
1083 map->properties &= ~clr;
1084 map->properties |= set | LPI_PROP_GROUP1;
Marc Zyngier015ec032016-12-20 09:54:57 +00001085 } else {
Marc Zyngiere1a2e202018-07-27 14:36:00 +01001086 va = gic_rdists->prop_table_va;
Marc Zyngier015ec032016-12-20 09:54:57 +00001087 hwirq = d->hwirq;
1088 }
Marc Zyngieradcdb942016-12-19 19:18:13 +00001089
Marc Zyngiere1a2e202018-07-27 14:36:00 +01001090 cfg = va + hwirq - 8192;
Marc Zyngieradcdb942016-12-19 19:18:13 +00001091 *cfg &= ~clr;
Marc Zyngier015ec032016-12-20 09:54:57 +00001092 *cfg |= set | LPI_PROP_GROUP1;
Marc Zyngierc48ed512014-11-24 14:35:12 +00001093
1094 /*
1095 * Make the above write visible to the redistributors.
1096 * And yes, we're flushing exactly: One. Single. Byte.
1097 * Humpf...
1098 */
1099 if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
Vladimir Murzin328191c2016-11-02 11:54:05 +00001100 gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
Marc Zyngierc48ed512014-11-24 14:35:12 +00001101 else
1102 dsb(ishst);
Marc Zyngier015ec032016-12-20 09:54:57 +00001103}
1104
Marc Zyngier2f4f0642019-11-08 16:57:56 +00001105static void wait_for_syncr(void __iomem *rdbase)
1106{
1107 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
1108 cpu_relax();
1109}
1110
Marc Zyngier425c09b2019-11-08 16:57:57 +00001111static void direct_lpi_inv(struct irq_data *d)
1112{
1113 struct its_collection *col;
1114 void __iomem *rdbase;
1115
1116 /* Target the redistributor this LPI is currently routed to */
1117 col = irq_to_col(d);
1118 rdbase = per_cpu_ptr(gic_rdists->rdist, col->col_id)->rd_base;
1119 gic_write_lpir(d->hwirq, rdbase + GICR_INVLPIR);
1120
1121 wait_for_syncr(rdbase);
1122}
1123
Marc Zyngier015ec032016-12-20 09:54:57 +00001124static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1125{
1126 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1127
1128 lpi_write_config(d, clr, set);
Marc Zyngier425c09b2019-11-08 16:57:57 +00001129 if (gic_rdists->has_direct_lpi && !irqd_is_forwarded_to_vcpu(d))
1130 direct_lpi_inv(d);
1131 else
1132 its_send_inv(its_dev, its_get_event_id(d));
Marc Zyngierc48ed512014-11-24 14:35:12 +00001133}
1134
Marc Zyngier015ec032016-12-20 09:54:57 +00001135static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1136{
1137 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1138 u32 event = its_get_event_id(d);
1139
1140 if (its_dev->event_map.vlpi_maps[event].db_enabled == enable)
1141 return;
1142
1143 its_dev->event_map.vlpi_maps[event].db_enabled = enable;
1144
1145 /*
1146 * More fun with the architecture:
1147 *
1148 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1149 * value or to 1023, depending on the enable bit. But that
1150 * would be issueing a mapping for an /existing/ DevID+EventID
1151 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1152 * to the /same/ vPE, using this opportunity to adjust the
1153 * doorbell. Mouahahahaha. We loves it, Precious.
1154 */
1155 its_send_vmovi(its_dev, event);
Marc Zyngierc48ed512014-11-24 14:35:12 +00001156}
1157
1158static void its_mask_irq(struct irq_data *d)
1159{
Marc Zyngier015ec032016-12-20 09:54:57 +00001160 if (irqd_is_forwarded_to_vcpu(d))
1161 its_vlpi_set_doorbell(d, false);
1162
Marc Zyngieradcdb942016-12-19 19:18:13 +00001163 lpi_update_config(d, LPI_PROP_ENABLED, 0);
Marc Zyngierc48ed512014-11-24 14:35:12 +00001164}
1165
1166static void its_unmask_irq(struct irq_data *d)
1167{
Marc Zyngier015ec032016-12-20 09:54:57 +00001168 if (irqd_is_forwarded_to_vcpu(d))
1169 its_vlpi_set_doorbell(d, true);
1170
Marc Zyngieradcdb942016-12-19 19:18:13 +00001171 lpi_update_config(d, 0, LPI_PROP_ENABLED);
Marc Zyngierc48ed512014-11-24 14:35:12 +00001172}
1173
Marc Zyngierc48ed512014-11-24 14:35:12 +00001174static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1175 bool force)
1176{
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02001177 unsigned int cpu;
1178 const struct cpumask *cpu_mask = cpu_online_mask;
Marc Zyngierc48ed512014-11-24 14:35:12 +00001179 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1180 struct its_collection *target_col;
1181 u32 id = its_get_event_id(d);
1182
Marc Zyngier015ec032016-12-20 09:54:57 +00001183 /* A forwarded interrupt should use irq_set_vcpu_affinity */
1184 if (irqd_is_forwarded_to_vcpu(d))
1185 return -EINVAL;
1186
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02001187 /* lpi cannot be routed to a redistributor that is on a foreign node */
1188 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
1189 if (its_dev->its->numa_node >= 0) {
1190 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
1191 if (!cpumask_intersects(mask_val, cpu_mask))
1192 return -EINVAL;
1193 }
1194 }
1195
1196 cpu = cpumask_any_and(mask_val, cpu_mask);
1197
Marc Zyngierc48ed512014-11-24 14:35:12 +00001198 if (cpu >= nr_cpu_ids)
1199 return -EINVAL;
1200
MaJun8b8d94a2017-05-18 16:19:13 +08001201 /* don't set the affinity when the target cpu is same as current one */
1202 if (cpu != its_dev->event_map.col_map[id]) {
1203 target_col = &its_dev->its->collections[cpu];
1204 its_send_movi(its_dev, target_col, id);
1205 its_dev->event_map.col_map[id] = cpu;
Marc Zyngier0d224d32017-08-18 09:39:18 +01001206 irq_data_update_effective_affinity(d, cpumask_of(cpu));
MaJun8b8d94a2017-05-18 16:19:13 +08001207 }
Marc Zyngierc48ed512014-11-24 14:35:12 +00001208
1209 return IRQ_SET_MASK_OK_DONE;
1210}
1211
Ard Biesheuvel558b0162017-10-17 17:55:56 +01001212static u64 its_irq_get_msi_base(struct its_device *its_dev)
1213{
1214 struct its_node *its = its_dev->its;
1215
1216 return its->phys_base + GITS_TRANSLATER;
1217}
1218
Marc Zyngierb48ac832014-11-24 14:35:16 +00001219static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1220{
1221 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1222 struct its_node *its;
1223 u64 addr;
1224
1225 its = its_dev->its;
Ard Biesheuvel558b0162017-10-17 17:55:56 +01001226 addr = its->get_msi_base(its_dev);
Marc Zyngierb48ac832014-11-24 14:35:16 +00001227
Vladimir Murzinb11283e2016-11-02 11:54:03 +00001228 msg->address_lo = lower_32_bits(addr);
1229 msg->address_hi = upper_32_bits(addr);
Marc Zyngierb48ac832014-11-24 14:35:16 +00001230 msg->data = its_get_event_id(d);
Robin Murphy44bb7e22016-09-12 17:13:59 +01001231
Julien Grall35ae7df2019-05-01 14:58:21 +01001232 iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg);
Marc Zyngierb48ac832014-11-24 14:35:16 +00001233}
1234
Marc Zyngier8d85dce2016-12-19 18:02:13 +00001235static int its_irq_set_irqchip_state(struct irq_data *d,
1236 enum irqchip_irq_state which,
1237 bool state)
1238{
1239 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1240 u32 event = its_get_event_id(d);
1241
1242 if (which != IRQCHIP_STATE_PENDING)
1243 return -EINVAL;
1244
1245 if (state)
1246 its_send_int(its_dev, event);
1247 else
1248 its_send_clear(its_dev, event);
1249
1250 return 0;
1251}
1252
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001253static void its_map_vm(struct its_node *its, struct its_vm *vm)
1254{
1255 unsigned long flags;
1256
1257 /* Not using the ITS list? Everything is always mapped. */
1258 if (!its_list_map)
1259 return;
1260
1261 raw_spin_lock_irqsave(&vmovp_lock, flags);
1262
1263 /*
1264 * If the VM wasn't mapped yet, iterate over the vpes and get
1265 * them mapped now.
1266 */
1267 vm->vlpi_count[its->list_nr]++;
1268
1269 if (vm->vlpi_count[its->list_nr] == 1) {
1270 int i;
1271
1272 for (i = 0; i < vm->nr_vpes; i++) {
1273 struct its_vpe *vpe = vm->vpes[i];
Marc Zyngier44c4c252017-10-19 10:11:34 +01001274 struct irq_data *d = irq_get_irq_data(vpe->irq);
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001275
1276 /* Map the VPE to the first possible CPU */
1277 vpe->col_idx = cpumask_first(cpu_online_mask);
1278 its_send_vmapp(its, vpe, true);
1279 its_send_vinvall(its, vpe);
Marc Zyngier44c4c252017-10-19 10:11:34 +01001280 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001281 }
1282 }
1283
1284 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1285}
1286
1287static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
1288{
1289 unsigned long flags;
1290
1291 /* Not using the ITS list? Everything is always mapped. */
1292 if (!its_list_map)
1293 return;
1294
1295 raw_spin_lock_irqsave(&vmovp_lock, flags);
1296
1297 if (!--vm->vlpi_count[its->list_nr]) {
1298 int i;
1299
1300 for (i = 0; i < vm->nr_vpes; i++)
1301 its_send_vmapp(its, vm->vpes[i], false);
1302 }
1303
1304 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1305}
1306
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001307static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1308{
1309 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1310 u32 event = its_get_event_id(d);
1311 int ret = 0;
1312
1313 if (!info->map)
1314 return -EINVAL;
1315
1316 mutex_lock(&its_dev->event_map.vlpi_lock);
1317
1318 if (!its_dev->event_map.vm) {
1319 struct its_vlpi_map *maps;
1320
Kees Cook6396bb22018-06-12 14:03:40 -07001321 maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001322 GFP_KERNEL);
1323 if (!maps) {
1324 ret = -ENOMEM;
1325 goto out;
1326 }
1327
1328 its_dev->event_map.vm = info->map->vm;
1329 its_dev->event_map.vlpi_maps = maps;
1330 } else if (its_dev->event_map.vm != info->map->vm) {
1331 ret = -EINVAL;
1332 goto out;
1333 }
1334
1335 /* Get our private copy of the mapping information */
1336 its_dev->event_map.vlpi_maps[event] = *info->map;
1337
1338 if (irqd_is_forwarded_to_vcpu(d)) {
1339 /* Already mapped, move it around */
1340 its_send_vmovi(its_dev, event);
1341 } else {
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001342 /* Ensure all the VPEs are mapped on this ITS */
1343 its_map_vm(its_dev->its, info->map->vm);
1344
Marc Zyngierd4d7b4a2017-10-26 10:44:07 +01001345 /*
1346 * Flag the interrupt as forwarded so that we can
1347 * start poking the virtual property table.
1348 */
1349 irqd_set_forwarded_to_vcpu(d);
1350
1351 /* Write out the property to the prop table */
1352 lpi_write_config(d, 0xff, info->map->properties);
1353
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001354 /* Drop the physical mapping */
1355 its_send_discard(its_dev, event);
1356
1357 /* and install the virtual one */
1358 its_send_vmapti(its_dev, event);
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001359
1360 /* Increment the number of VLPIs */
1361 its_dev->event_map.nr_vlpis++;
1362 }
1363
1364out:
1365 mutex_unlock(&its_dev->event_map.vlpi_lock);
1366 return ret;
1367}
1368
1369static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1370{
1371 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1372 u32 event = its_get_event_id(d);
1373 int ret = 0;
1374
1375 mutex_lock(&its_dev->event_map.vlpi_lock);
1376
1377 if (!its_dev->event_map.vm ||
1378 !its_dev->event_map.vlpi_maps[event].vm) {
1379 ret = -EINVAL;
1380 goto out;
1381 }
1382
1383 /* Copy our mapping information to the incoming request */
1384 *info->map = its_dev->event_map.vlpi_maps[event];
1385
1386out:
1387 mutex_unlock(&its_dev->event_map.vlpi_lock);
1388 return ret;
1389}
1390
1391static int its_vlpi_unmap(struct irq_data *d)
1392{
1393 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1394 u32 event = its_get_event_id(d);
1395 int ret = 0;
1396
1397 mutex_lock(&its_dev->event_map.vlpi_lock);
1398
1399 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
1400 ret = -EINVAL;
1401 goto out;
1402 }
1403
1404 /* Drop the virtual mapping */
1405 its_send_discard(its_dev, event);
1406
1407 /* and restore the physical one */
1408 irqd_clr_forwarded_to_vcpu(d);
1409 its_send_mapti(its_dev, d->hwirq, event);
1410 lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
1411 LPI_PROP_ENABLED |
1412 LPI_PROP_GROUP1));
1413
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001414 /* Potentially unmap the VM from this ITS */
1415 its_unmap_vm(its_dev->its, its_dev->event_map.vm);
1416
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001417 /*
1418 * Drop the refcount and make the device available again if
1419 * this was the last VLPI.
1420 */
1421 if (!--its_dev->event_map.nr_vlpis) {
1422 its_dev->event_map.vm = NULL;
1423 kfree(its_dev->event_map.vlpi_maps);
1424 }
1425
1426out:
1427 mutex_unlock(&its_dev->event_map.vlpi_lock);
1428 return ret;
1429}
1430
Marc Zyngier015ec032016-12-20 09:54:57 +00001431static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
1432{
1433 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1434
1435 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1436 return -EINVAL;
1437
1438 if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
1439 lpi_update_config(d, 0xff, info->config);
1440 else
1441 lpi_write_config(d, 0xff, info->config);
1442 its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
1443
1444 return 0;
1445}
1446
Marc Zyngierc808eea2016-12-20 09:31:20 +00001447static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
1448{
1449 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1450 struct its_cmd_info *info = vcpu_info;
1451
1452 /* Need a v4 ITS */
Marc Zyngier0dd57fe2019-11-08 16:57:58 +00001453 if (!is_v4(its_dev->its))
Marc Zyngierc808eea2016-12-20 09:31:20 +00001454 return -EINVAL;
1455
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001456 /* Unmap request? */
1457 if (!info)
1458 return its_vlpi_unmap(d);
1459
Marc Zyngierc808eea2016-12-20 09:31:20 +00001460 switch (info->cmd_type) {
1461 case MAP_VLPI:
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001462 return its_vlpi_map(d, info);
Marc Zyngierc808eea2016-12-20 09:31:20 +00001463
1464 case GET_VLPI:
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001465 return its_vlpi_get(d, info);
Marc Zyngierc808eea2016-12-20 09:31:20 +00001466
1467 case PROP_UPDATE_VLPI:
1468 case PROP_UPDATE_AND_INV_VLPI:
Marc Zyngier015ec032016-12-20 09:54:57 +00001469 return its_vlpi_prop_update(d, info);
Marc Zyngierc808eea2016-12-20 09:31:20 +00001470
1471 default:
1472 return -EINVAL;
1473 }
1474}
1475
Marc Zyngierc48ed512014-11-24 14:35:12 +00001476static struct irq_chip its_irq_chip = {
1477 .name = "ITS",
1478 .irq_mask = its_mask_irq,
1479 .irq_unmask = its_unmask_irq,
Ashok Kumar004fa082016-02-11 05:38:53 -08001480 .irq_eoi = irq_chip_eoi_parent,
Marc Zyngierc48ed512014-11-24 14:35:12 +00001481 .irq_set_affinity = its_set_affinity,
Marc Zyngierb48ac832014-11-24 14:35:16 +00001482 .irq_compose_msi_msg = its_irq_compose_msi_msg,
Marc Zyngier8d85dce2016-12-19 18:02:13 +00001483 .irq_set_irqchip_state = its_irq_set_irqchip_state,
Marc Zyngierc808eea2016-12-20 09:31:20 +00001484 .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
Marc Zyngierb48ac832014-11-24 14:35:16 +00001485};
1486
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001487
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001488/*
1489 * How we allocate LPIs:
1490 *
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001491 * lpi_range_list contains ranges of LPIs that are to available to
1492 * allocate from. To allocate LPIs, just pick the first range that
1493 * fits the required allocation, and reduce it by the required
1494 * amount. Once empty, remove the range from the list.
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001495 *
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001496 * To free a range of LPIs, add a free range to the list, sort it and
1497 * merge the result if the new range happens to be adjacent to an
1498 * already free block.
1499 *
1500 * The consequence of the above is that allocation is cost is low, but
1501 * freeing is expensive. We assumes that freeing rarely occurs.
1502 */
Jia He4cb205c2018-08-28 12:53:26 +08001503#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001504
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001505static DEFINE_MUTEX(lpi_range_lock);
1506static LIST_HEAD(lpi_range_list);
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001507
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001508struct lpi_range {
1509 struct list_head entry;
1510 u32 base_id;
1511 u32 span;
1512};
1513
1514static struct lpi_range *mk_lpi_range(u32 base, u32 span)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001515{
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001516 struct lpi_range *range;
1517
Rasmus Villemoes1c73fac2019-03-12 18:33:48 +01001518 range = kmalloc(sizeof(*range), GFP_KERNEL);
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001519 if (range) {
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001520 range->base_id = base;
1521 range->span = span;
1522 }
1523
1524 return range;
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001525}
1526
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001527static int alloc_lpi_range(u32 nr_lpis, u32 *base)
1528{
1529 struct lpi_range *range, *tmp;
1530 int err = -ENOSPC;
1531
1532 mutex_lock(&lpi_range_lock);
1533
1534 list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
1535 if (range->span >= nr_lpis) {
1536 *base = range->base_id;
1537 range->base_id += nr_lpis;
1538 range->span -= nr_lpis;
1539
1540 if (range->span == 0) {
1541 list_del(&range->entry);
1542 kfree(range);
1543 }
1544
1545 err = 0;
1546 break;
1547 }
1548 }
1549
1550 mutex_unlock(&lpi_range_lock);
1551
1552 pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
1553 return err;
1554}
1555
Rasmus Villemoes12eade12019-03-12 18:33:49 +01001556static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b)
1557{
1558 if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list)
1559 return;
1560 if (a->base_id + a->span != b->base_id)
1561 return;
1562 b->base_id = a->base_id;
1563 b->span += a->span;
1564 list_del(&a->entry);
1565 kfree(a);
1566}
1567
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001568static int free_lpi_range(u32 base, u32 nr_lpis)
1569{
Rasmus Villemoes12eade12019-03-12 18:33:49 +01001570 struct lpi_range *new, *old;
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001571
1572 new = mk_lpi_range(base, nr_lpis);
Rasmus Villemoesb31a3832019-03-12 18:33:47 +01001573 if (!new)
1574 return -ENOMEM;
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001575
1576 mutex_lock(&lpi_range_lock);
1577
Rasmus Villemoes12eade12019-03-12 18:33:49 +01001578 list_for_each_entry_reverse(old, &lpi_range_list, entry) {
1579 if (old->base_id < base)
1580 break;
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001581 }
Rasmus Villemoes12eade12019-03-12 18:33:49 +01001582 /*
1583 * old is the last element with ->base_id smaller than base,
1584 * so new goes right after it. If there are no elements with
1585 * ->base_id smaller than base, &old->entry ends up pointing
1586 * at the head of the list, and inserting new it the start of
1587 * the list is the right thing to do in that case as well.
1588 */
1589 list_add(&new->entry, &old->entry);
1590 /*
1591 * Now check if we can merge with the preceding and/or
1592 * following ranges.
1593 */
1594 merge_lpi_ranges(old, new);
1595 merge_lpi_ranges(new, list_next_entry(new, entry));
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001596
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001597 mutex_unlock(&lpi_range_lock);
Rasmus Villemoesb31a3832019-03-12 18:33:47 +01001598 return 0;
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001599}
1600
Tomasz Nowicki04a0e4d2016-01-19 14:11:18 +01001601static int __init its_lpi_init(u32 id_bits)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001602{
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001603 u32 lpis = (1UL << id_bits) - 8192;
Marc Zyngier12b29052018-05-31 09:01:59 +01001604 u32 numlpis;
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001605 int err;
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001606
Marc Zyngier12b29052018-05-31 09:01:59 +01001607 numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
1608
1609 if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
1610 lpis = numlpis;
1611 pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
1612 lpis);
1613 }
1614
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001615 /*
1616 * Initializing the allocator is just the same as freeing the
1617 * full range of LPIs.
1618 */
1619 err = free_lpi_range(8192, lpis);
1620 pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
1621 return err;
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001622}
1623
Marc Zyngier38dd7c42018-05-27 17:03:03 +01001624static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001625{
1626 unsigned long *bitmap = NULL;
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001627 int err = 0;
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001628
1629 do {
Marc Zyngier38dd7c42018-05-27 17:03:03 +01001630 err = alloc_lpi_range(nr_irqs, base);
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001631 if (!err)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001632 break;
1633
Marc Zyngier38dd7c42018-05-27 17:03:03 +01001634 nr_irqs /= 2;
1635 } while (nr_irqs > 0);
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001636
Marc Zyngier45725e02019-01-29 15:19:23 +00001637 if (!nr_irqs)
1638 err = -ENOSPC;
1639
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001640 if (err)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001641 goto out;
1642
Marc Zyngier38dd7c42018-05-27 17:03:03 +01001643 bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC);
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001644 if (!bitmap)
1645 goto out;
1646
Marc Zyngier38dd7c42018-05-27 17:03:03 +01001647 *nr_ids = nr_irqs;
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001648
1649out:
Marc Zyngierc8415b92015-10-02 16:44:05 +01001650 if (!bitmap)
1651 *base = *nr_ids = 0;
1652
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001653 return bitmap;
1654}
1655
Marc Zyngier38dd7c42018-05-27 17:03:03 +01001656static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001657{
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001658 WARN_ON(free_lpi_range(base, nr_ids));
Marc Zyngiercf2be8b2016-12-19 18:49:59 +00001659 kfree(bitmap);
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001660}
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001661
Marc Zyngier053be482018-07-27 15:02:27 +01001662static void gic_reset_prop_table(void *va)
1663{
1664 /* Priority 0xa0, Group-1, disabled */
1665 memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ);
1666
1667 /* Make sure the GIC will observe the written configuration */
1668 gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ);
1669}
1670
Marc Zyngier0e5ccf92016-12-19 18:15:05 +00001671static struct page *its_allocate_prop_table(gfp_t gfp_flags)
1672{
1673 struct page *prop_page;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001674
Marc Zyngier0e5ccf92016-12-19 18:15:05 +00001675 prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
1676 if (!prop_page)
1677 return NULL;
1678
Marc Zyngier053be482018-07-27 15:02:27 +01001679 gic_reset_prop_table(page_address(prop_page));
Marc Zyngier0e5ccf92016-12-19 18:15:05 +00001680
1681 return prop_page;
1682}
1683
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00001684static void its_free_prop_table(struct page *prop_page)
1685{
1686 free_pages((unsigned long)page_address(prop_page),
1687 get_order(LPI_PROPBASE_SZ));
1688}
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001689
Marc Zyngier5e2c9f92018-07-27 16:23:18 +01001690static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
1691{
1692 phys_addr_t start, end, addr_end;
1693 u64 i;
1694
1695 /*
1696 * We don't bother checking for a kdump kernel as by
1697 * construction, the LPI tables are out of this kernel's
1698 * memory map.
1699 */
1700 if (is_kdump_kernel())
1701 return true;
1702
1703 addr_end = addr + size - 1;
1704
1705 for_each_reserved_mem_region(i, &start, &end) {
1706 if (addr >= start && addr_end <= end)
1707 return true;
1708 }
1709
1710 /* Not found, not a good sign... */
1711 pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n",
1712 &addr, &addr_end);
1713 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
1714 return false;
1715}
1716
Marc Zyngier3fb68fa2018-07-27 16:21:18 +01001717static int gic_reserve_range(phys_addr_t addr, unsigned long size)
1718{
1719 if (efi_enabled(EFI_CONFIG_TABLES))
1720 return efi_mem_reserve_persistent(addr, size);
1721
1722 return 0;
1723}
1724
Marc Zyngier11e37d32018-07-27 13:38:54 +01001725static int __init its_setup_lpi_prop_table(void)
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001726{
Marc Zyngierc440a9d2018-07-27 15:40:13 +01001727 if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) {
1728 u64 val;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001729
Marc Zyngierc440a9d2018-07-27 15:40:13 +01001730 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
1731 lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1;
1732
1733 gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12);
1734 gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa,
1735 LPI_PROPBASE_SZ,
1736 MEMREMAP_WB);
1737 gic_reset_prop_table(gic_rdists->prop_table_va);
1738 } else {
1739 struct page *page;
1740
1741 lpi_id_bits = min_t(u32,
1742 GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
1743 ITS_MAX_LPI_NRBITS);
1744 page = its_allocate_prop_table(GFP_NOWAIT);
1745 if (!page) {
1746 pr_err("Failed to allocate PROPBASE\n");
1747 return -ENOMEM;
1748 }
1749
1750 gic_rdists->prop_table_pa = page_to_phys(page);
1751 gic_rdists->prop_table_va = page_address(page);
Marc Zyngier3fb68fa2018-07-27 16:21:18 +01001752 WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa,
1753 LPI_PROPBASE_SZ));
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001754 }
1755
Marc Zyngiere1a2e202018-07-27 14:36:00 +01001756 pr_info("GICv3: using LPI property table @%pa\n",
1757 &gic_rdists->prop_table_pa);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001758
Shanker Donthineni6c31e122017-06-22 18:19:14 -05001759 return its_lpi_init(lpi_id_bits);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001760}
1761
1762static const char *its_base_type_string[] = {
1763 [GITS_BASER_TYPE_DEVICE] = "Devices",
1764 [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
Marc Zyngier4f46de92016-12-20 15:50:14 +00001765 [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)",
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001766 [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
1767 [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
1768 [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
1769 [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
1770};
1771
Shanker Donthineni2d81d422016-06-06 18:17:28 -05001772static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
1773{
1774 u32 idx = baser - its->tables;
1775
Vladimir Murzin0968a612016-11-02 11:54:06 +00001776 return gits_read_baser(its->base + GITS_BASER + (idx << 3));
Shanker Donthineni2d81d422016-06-06 18:17:28 -05001777}
1778
1779static void its_write_baser(struct its_node *its, struct its_baser *baser,
1780 u64 val)
1781{
1782 u32 idx = baser - its->tables;
1783
Vladimir Murzin0968a612016-11-02 11:54:06 +00001784 gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
Shanker Donthineni2d81d422016-06-06 18:17:28 -05001785 baser->val = its_read_baser(its, baser);
1786}
1787
Shanker Donthineni93473592016-06-06 18:17:30 -05001788static int its_setup_baser(struct its_node *its, struct its_baser *baser,
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001789 u64 cache, u64 shr, u32 psz, u32 order,
1790 bool indirect)
Shanker Donthineni93473592016-06-06 18:17:30 -05001791{
1792 u64 val = its_read_baser(its, baser);
1793 u64 esz = GITS_BASER_ENTRY_SIZE(val);
1794 u64 type = GITS_BASER_TYPE(val);
Shanker Donthineni30ae9612017-10-09 11:46:55 -05001795 u64 baser_phys, tmp;
Shanker Donthineni93473592016-06-06 18:17:30 -05001796 u32 alloc_pages;
Shanker Donthineni539d3782019-01-14 09:50:19 +00001797 struct page *page;
Shanker Donthineni93473592016-06-06 18:17:30 -05001798 void *base;
Shanker Donthineni93473592016-06-06 18:17:30 -05001799
1800retry_alloc_baser:
1801 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
1802 if (alloc_pages > GITS_BASER_PAGES_MAX) {
1803 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
1804 &its->phys_base, its_base_type_string[type],
1805 alloc_pages, GITS_BASER_PAGES_MAX);
1806 alloc_pages = GITS_BASER_PAGES_MAX;
1807 order = get_order(GITS_BASER_PAGES_MAX * psz);
1808 }
1809
Shanker Donthineni539d3782019-01-14 09:50:19 +00001810 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
1811 if (!page)
Shanker Donthineni93473592016-06-06 18:17:30 -05001812 return -ENOMEM;
1813
Shanker Donthineni539d3782019-01-14 09:50:19 +00001814 base = (void *)page_address(page);
Shanker Donthineni30ae9612017-10-09 11:46:55 -05001815 baser_phys = virt_to_phys(base);
1816
1817 /* Check if the physical address of the memory is above 48bits */
1818 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
1819
1820 /* 52bit PA is supported only when PageSize=64K */
1821 if (psz != SZ_64K) {
1822 pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
1823 free_pages((unsigned long)base, order);
1824 return -ENXIO;
1825 }
1826
1827 /* Convert 52bit PA to 48bit field */
1828 baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
1829 }
1830
Shanker Donthineni93473592016-06-06 18:17:30 -05001831retry_baser:
Shanker Donthineni30ae9612017-10-09 11:46:55 -05001832 val = (baser_phys |
Shanker Donthineni93473592016-06-06 18:17:30 -05001833 (type << GITS_BASER_TYPE_SHIFT) |
1834 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
1835 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
1836 cache |
1837 shr |
1838 GITS_BASER_VALID);
1839
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001840 val |= indirect ? GITS_BASER_INDIRECT : 0x0;
1841
Shanker Donthineni93473592016-06-06 18:17:30 -05001842 switch (psz) {
1843 case SZ_4K:
1844 val |= GITS_BASER_PAGE_SIZE_4K;
1845 break;
1846 case SZ_16K:
1847 val |= GITS_BASER_PAGE_SIZE_16K;
1848 break;
1849 case SZ_64K:
1850 val |= GITS_BASER_PAGE_SIZE_64K;
1851 break;
1852 }
1853
1854 its_write_baser(its, baser, val);
1855 tmp = baser->val;
1856
1857 if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
1858 /*
1859 * Shareability didn't stick. Just use
1860 * whatever the read reported, which is likely
1861 * to be the only thing this redistributor
1862 * supports. If that's zero, make it
1863 * non-cacheable as well.
1864 */
1865 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
1866 if (!shr) {
1867 cache = GITS_BASER_nC;
Vladimir Murzin328191c2016-11-02 11:54:05 +00001868 gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
Shanker Donthineni93473592016-06-06 18:17:30 -05001869 }
1870 goto retry_baser;
1871 }
1872
1873 if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
1874 /*
1875 * Page size didn't stick. Let's try a smaller
1876 * size and retry. If we reach 4K, then
1877 * something is horribly wrong...
1878 */
1879 free_pages((unsigned long)base, order);
1880 baser->base = NULL;
1881
1882 switch (psz) {
1883 case SZ_16K:
1884 psz = SZ_4K;
1885 goto retry_alloc_baser;
1886 case SZ_64K:
1887 psz = SZ_16K;
1888 goto retry_alloc_baser;
1889 }
1890 }
1891
1892 if (val != tmp) {
Vladimir Murzinb11283e2016-11-02 11:54:03 +00001893 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
Shanker Donthineni93473592016-06-06 18:17:30 -05001894 &its->phys_base, its_base_type_string[type],
Vladimir Murzinb11283e2016-11-02 11:54:03 +00001895 val, tmp);
Shanker Donthineni93473592016-06-06 18:17:30 -05001896 free_pages((unsigned long)base, order);
1897 return -ENXIO;
1898 }
1899
1900 baser->order = order;
1901 baser->base = base;
1902 baser->psz = psz;
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001903 tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
Shanker Donthineni93473592016-06-06 18:17:30 -05001904
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001905 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
Vladimir Murzind524eaa2016-11-02 11:54:04 +00001906 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
Shanker Donthineni93473592016-06-06 18:17:30 -05001907 its_base_type_string[type],
1908 (unsigned long)virt_to_phys(base),
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001909 indirect ? "indirect" : "flat", (int)esz,
Shanker Donthineni93473592016-06-06 18:17:30 -05001910 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
1911
1912 return 0;
1913}
1914
Marc Zyngier4cacac52016-12-19 18:18:34 +00001915static bool its_parse_indirect_baser(struct its_node *its,
1916 struct its_baser *baser,
Shanker Donthineni32bd44d2017-10-07 15:43:48 -05001917 u32 psz, u32 *order, u32 ids)
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001918{
Marc Zyngier4cacac52016-12-19 18:18:34 +00001919 u64 tmp = its_read_baser(its, baser);
1920 u64 type = GITS_BASER_TYPE(tmp);
1921 u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06001922 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001923 u32 new_order = *order;
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001924 bool indirect = false;
1925
1926 /* No need to enable Indirection if memory requirement < (psz*2)bytes */
1927 if ((esz << ids) > (psz * 2)) {
1928 /*
1929 * Find out whether hw supports a single or two-level table by
1930 * table by reading bit at offset '62' after writing '1' to it.
1931 */
1932 its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
1933 indirect = !!(baser->val & GITS_BASER_INDIRECT);
1934
1935 if (indirect) {
1936 /*
1937 * The size of the lvl2 table is equal to ITS page size
1938 * which is 'psz'. For computing lvl1 table size,
1939 * subtract ID bits that sparse lvl2 table from 'ids'
1940 * which is reported by ITS hardware times lvl1 table
1941 * entry size.
1942 */
Vladimir Murzind524eaa2016-11-02 11:54:04 +00001943 ids -= ilog2(psz / (int)esz);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001944 esz = GITS_LVL1_ENTRY_SIZE;
1945 }
1946 }
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001947
1948 /*
1949 * Allocate as many entries as required to fit the
1950 * range of device IDs that the ITS can grok... The ID
1951 * space being incredibly sparse, this results in a
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001952 * massive waste of memory if two-level device table
1953 * feature is not supported by hardware.
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001954 */
1955 new_order = max_t(u32, get_order(esz << ids), new_order);
1956 if (new_order >= MAX_ORDER) {
1957 new_order = MAX_ORDER - 1;
Vladimir Murzind524eaa2016-11-02 11:54:04 +00001958 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
Marc Zyngier576a8342019-11-08 16:58:00 +00001959 pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n",
Marc Zyngier4cacac52016-12-19 18:18:34 +00001960 &its->phys_base, its_base_type_string[type],
Marc Zyngier576a8342019-11-08 16:58:00 +00001961 device_ids(its), ids);
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001962 }
1963
1964 *order = new_order;
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001965
1966 return indirect;
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001967}
1968
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001969static void its_free_tables(struct its_node *its)
1970{
1971 int i;
1972
1973 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
Shanker Donthineni1a485f42016-02-01 20:19:44 -06001974 if (its->tables[i].base) {
1975 free_pages((unsigned long)its->tables[i].base,
1976 its->tables[i].order);
1977 its->tables[i].base = NULL;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001978 }
1979 }
1980}
1981
Shanker Donthineni0e0b0f62016-06-06 18:17:31 -05001982static int its_alloc_tables(struct its_node *its)
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001983{
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001984 u64 shr = GITS_BASER_InnerShareable;
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06001985 u64 cache = GITS_BASER_RaWaWb;
Shanker Donthineni93473592016-06-06 18:17:30 -05001986 u32 psz = SZ_64K;
1987 int err, i;
Robert Richter94100972015-09-21 22:58:38 +02001988
Ard Biesheuvelfa150012017-10-17 17:55:54 +01001989 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
1990 /* erratum 24313: ignore memory access type */
1991 cache = GITS_BASER_nCnB;
Shanker Donthineni466b7d12016-03-09 22:10:49 -06001992
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001993 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
Shanker Donthineni2d81d422016-06-06 18:17:28 -05001994 struct its_baser *baser = its->tables + i;
1995 u64 val = its_read_baser(its, baser);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001996 u64 type = GITS_BASER_TYPE(val);
Shanker Donthineni93473592016-06-06 18:17:30 -05001997 u32 order = get_order(psz);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001998 bool indirect = false;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001999
Marc Zyngier4cacac52016-12-19 18:18:34 +00002000 switch (type) {
2001 case GITS_BASER_TYPE_NONE:
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002002 continue;
2003
Marc Zyngier4cacac52016-12-19 18:18:34 +00002004 case GITS_BASER_TYPE_DEVICE:
Shanker Donthineni32bd44d2017-10-07 15:43:48 -05002005 indirect = its_parse_indirect_baser(its, baser,
2006 psz, &order,
Marc Zyngier576a8342019-11-08 16:58:00 +00002007 device_ids(its));
Zenghui Yu8d565742019-02-10 05:24:10 +00002008 break;
2009
Marc Zyngier4cacac52016-12-19 18:18:34 +00002010 case GITS_BASER_TYPE_VCPU:
2011 indirect = its_parse_indirect_baser(its, baser,
Shanker Donthineni32bd44d2017-10-07 15:43:48 -05002012 psz, &order,
2013 ITS_MAX_VPEID_BITS);
Marc Zyngier4cacac52016-12-19 18:18:34 +00002014 break;
2015 }
Marc Zyngierf54b97e2015-03-06 16:37:41 +00002016
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002017 err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
Shanker Donthineni93473592016-06-06 18:17:30 -05002018 if (err < 0) {
2019 its_free_tables(its);
2020 return err;
Robert Richter30f21362015-09-21 22:58:34 +02002021 }
2022
Shanker Donthineni93473592016-06-06 18:17:30 -05002023 /* Update settings which will be used for next BASERn */
2024 psz = baser->psz;
2025 cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
2026 shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002027 }
2028
2029 return 0;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002030}
2031
2032static int its_alloc_collections(struct its_node *its)
2033{
Marc Zyngier83559b42018-06-22 10:52:52 +01002034 int i;
2035
Kees Cook6396bb22018-06-12 14:03:40 -07002036 its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002037 GFP_KERNEL);
2038 if (!its->collections)
2039 return -ENOMEM;
2040
Marc Zyngier83559b42018-06-22 10:52:52 +01002041 for (i = 0; i < nr_cpu_ids; i++)
2042 its->collections[i].target_address = ~0ULL;
2043
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002044 return 0;
2045}
2046
Marc Zyngier7c297a22016-12-19 18:34:38 +00002047static struct page *its_allocate_pending_table(gfp_t gfp_flags)
2048{
2049 struct page *pend_page;
Marc Zyngieradaab502018-07-17 18:06:39 +01002050
Marc Zyngier7c297a22016-12-19 18:34:38 +00002051 pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
Marc Zyngieradaab502018-07-17 18:06:39 +01002052 get_order(LPI_PENDBASE_SZ));
Marc Zyngier7c297a22016-12-19 18:34:38 +00002053 if (!pend_page)
2054 return NULL;
2055
2056 /* Make sure the GIC will observe the zero-ed page */
2057 gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
2058
2059 return pend_page;
2060}
2061
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002062static void its_free_pending_table(struct page *pt)
2063{
Marc Zyngieradaab502018-07-17 18:06:39 +01002064 free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002065}
2066
Marc Zyngierc6e2ccb2018-06-26 11:21:11 +01002067/*
Marc Zyngier5e2c9f92018-07-27 16:23:18 +01002068 * Booting with kdump and LPIs enabled is generally fine. Any other
2069 * case is wrong in the absence of firmware/EFI support.
Marc Zyngierc6e2ccb2018-06-26 11:21:11 +01002070 */
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002071static bool enabled_lpis_allowed(void)
2072{
Marc Zyngier5e2c9f92018-07-27 16:23:18 +01002073 phys_addr_t addr;
2074 u64 val;
Marc Zyngierc6e2ccb2018-06-26 11:21:11 +01002075
Marc Zyngier5e2c9f92018-07-27 16:23:18 +01002076 /* Check whether the property table is in a reserved region */
2077 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
2078 addr = val & GENMASK_ULL(51, 12);
2079
2080 return gic_check_reserved_range(addr, LPI_PROPBASE_SZ);
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002081}
2082
Marc Zyngier11e37d32018-07-27 13:38:54 +01002083static int __init allocate_lpi_tables(void)
2084{
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002085 u64 val;
Marc Zyngier11e37d32018-07-27 13:38:54 +01002086 int err, cpu;
2087
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002088 /*
2089 * If LPIs are enabled while we run this from the boot CPU,
2090 * flag the RD tables as pre-allocated if the stars do align.
2091 */
2092 val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR);
2093 if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) {
2094 gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED |
2095 RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING);
2096 pr_info("GICv3: Using preallocated redistributor tables\n");
2097 }
2098
Marc Zyngier11e37d32018-07-27 13:38:54 +01002099 err = its_setup_lpi_prop_table();
2100 if (err)
2101 return err;
2102
2103 /*
2104 * We allocate all the pending tables anyway, as we may have a
2105 * mix of RDs that have had LPIs enabled, and some that
2106 * don't. We'll free the unused ones as each CPU comes online.
2107 */
2108 for_each_possible_cpu(cpu) {
2109 struct page *pend_page;
2110
2111 pend_page = its_allocate_pending_table(GFP_NOWAIT);
2112 if (!pend_page) {
2113 pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
2114 return -ENOMEM;
2115 }
2116
2117 gic_data_rdist_cpu(cpu)->pend_page = pend_page;
2118 }
2119
2120 return 0;
2121}
2122
Heyi Guo64794502019-01-24 21:37:08 +08002123static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
2124{
2125 u32 count = 1000000; /* 1s! */
2126 bool clean;
2127 u64 val;
2128
2129 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2130 val &= ~GICR_VPENDBASER_Valid;
2131 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2132
2133 do {
2134 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2135 clean = !(val & GICR_VPENDBASER_Dirty);
2136 if (!clean) {
2137 count--;
2138 cpu_relax();
2139 udelay(1);
2140 }
2141 } while (!clean && count);
2142
2143 return val;
2144}
2145
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002146static void its_cpu_init_lpis(void)
2147{
2148 void __iomem *rbase = gic_data_rdist_rd_base();
2149 struct page *pend_page;
Marc Zyngier11e37d32018-07-27 13:38:54 +01002150 phys_addr_t paddr;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002151 u64 val, tmp;
2152
Marc Zyngier11e37d32018-07-27 13:38:54 +01002153 if (gic_data_rdist()->lpi_enabled)
2154 return;
2155
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002156 val = readl_relaxed(rbase + GICR_CTLR);
2157 if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) &&
2158 (val & GICR_CTLR_ENABLE_LPIS)) {
Marc Zyngierf842ca82018-07-27 16:03:31 +01002159 /*
2160 * Check that we get the same property table on all
2161 * RDs. If we don't, this is hopeless.
2162 */
2163 paddr = gicr_read_propbaser(rbase + GICR_PROPBASER);
2164 paddr &= GENMASK_ULL(51, 12);
2165 if (WARN_ON(gic_rdists->prop_table_pa != paddr))
2166 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
2167
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002168 paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER);
2169 paddr &= GENMASK_ULL(51, 16);
2170
Marc Zyngier5e2c9f92018-07-27 16:23:18 +01002171 WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ));
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002172 its_free_pending_table(gic_data_rdist()->pend_page);
2173 gic_data_rdist()->pend_page = NULL;
2174
2175 goto out;
2176 }
2177
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002178 pend_page = gic_data_rdist()->pend_page;
Marc Zyngier11e37d32018-07-27 13:38:54 +01002179 paddr = page_to_phys(pend_page);
Marc Zyngier3fb68fa2018-07-27 16:21:18 +01002180 WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002181
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002182 /* set PROPBASE */
Marc Zyngiere1a2e202018-07-27 14:36:00 +01002183 val = (gic_rdists->prop_table_pa |
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002184 GICR_PROPBASER_InnerShareable |
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06002185 GICR_PROPBASER_RaWaWb |
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002186 ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
2187
Vladimir Murzin0968a612016-11-02 11:54:06 +00002188 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
2189 tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002190
2191 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
Marc Zyngier241a3862015-03-27 14:15:05 +00002192 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
2193 /*
2194 * The HW reports non-shareable, we must
2195 * remove the cacheability attributes as
2196 * well.
2197 */
2198 val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
2199 GICR_PROPBASER_CACHEABILITY_MASK);
2200 val |= GICR_PROPBASER_nC;
Vladimir Murzin0968a612016-11-02 11:54:06 +00002201 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
Marc Zyngier241a3862015-03-27 14:15:05 +00002202 }
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002203 pr_info_once("GIC: using cache flushing for LPI property table\n");
2204 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
2205 }
2206
2207 /* set PENDBASE */
2208 val = (page_to_phys(pend_page) |
Marc Zyngier4ad3e362015-03-27 14:15:04 +00002209 GICR_PENDBASER_InnerShareable |
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06002210 GICR_PENDBASER_RaWaWb);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002211
Vladimir Murzin0968a612016-11-02 11:54:06 +00002212 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
2213 tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
Marc Zyngier241a3862015-03-27 14:15:05 +00002214
2215 if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
2216 /*
2217 * The HW reports non-shareable, we must remove the
2218 * cacheability attributes as well.
2219 */
2220 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
2221 GICR_PENDBASER_CACHEABILITY_MASK);
2222 val |= GICR_PENDBASER_nC;
Vladimir Murzin0968a612016-11-02 11:54:06 +00002223 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
Marc Zyngier241a3862015-03-27 14:15:05 +00002224 }
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002225
2226 /* Enable LPIs */
2227 val = readl_relaxed(rbase + GICR_CTLR);
2228 val |= GICR_CTLR_ENABLE_LPIS;
2229 writel_relaxed(val, rbase + GICR_CTLR);
2230
Heyi Guo64794502019-01-24 21:37:08 +08002231 if (gic_rdists->has_vlpis) {
2232 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2233
2234 /*
2235 * It's possible for CPU to receive VLPIs before it is
2236 * sheduled as a vPE, especially for the first CPU, and the
2237 * VLPI with INTID larger than 2^(IDbits+1) will be considered
2238 * as out of range and dropped by GIC.
2239 * So we initialize IDbits to known value to avoid VLPI drop.
2240 */
2241 val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
2242 pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
2243 smp_processor_id(), val);
2244 gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2245
2246 /*
2247 * Also clear Valid bit of GICR_VPENDBASER, in case some
2248 * ancient programming gets left in and has possibility of
2249 * corrupting memory.
2250 */
2251 val = its_clear_vpend_valid(vlpi_base);
2252 WARN_ON(val & GICR_VPENDBASER_Dirty);
2253 }
2254
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002255 /* Make sure the GIC has seen the above */
2256 dsb(sy);
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002257out:
Marc Zyngier11e37d32018-07-27 13:38:54 +01002258 gic_data_rdist()->lpi_enabled = true;
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002259 pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
Marc Zyngier11e37d32018-07-27 13:38:54 +01002260 smp_processor_id(),
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002261 gic_data_rdist()->pend_page ? "allocated" : "reserved",
Marc Zyngier11e37d32018-07-27 13:38:54 +01002262 &paddr);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002263}
2264
Derek Basehore920181c2018-02-28 21:48:20 -08002265static void its_cpu_init_collection(struct its_node *its)
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002266{
Derek Basehore920181c2018-02-28 21:48:20 -08002267 int cpu = smp_processor_id();
2268 u64 target;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002269
Derek Basehore920181c2018-02-28 21:48:20 -08002270 /* avoid cross node collections and its mapping */
2271 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
2272 struct device_node *cpu_node;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002273
Derek Basehore920181c2018-02-28 21:48:20 -08002274 cpu_node = of_get_cpu_node(cpu, NULL);
2275 if (its->numa_node != NUMA_NO_NODE &&
2276 its->numa_node != of_node_to_nid(cpu_node))
2277 return;
2278 }
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002279
Derek Basehore920181c2018-02-28 21:48:20 -08002280 /*
2281 * We now have to bind each collection to its target
2282 * redistributor.
2283 */
2284 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002285 /*
Derek Basehore920181c2018-02-28 21:48:20 -08002286 * This ITS wants the physical address of the
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002287 * redistributor.
2288 */
Derek Basehore920181c2018-02-28 21:48:20 -08002289 target = gic_data_rdist()->phys_base;
2290 } else {
2291 /* This ITS wants a linear CPU number. */
2292 target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2293 target = GICR_TYPER_CPU_NUMBER(target) << 16;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002294 }
2295
Derek Basehore920181c2018-02-28 21:48:20 -08002296 /* Perform collection mapping */
2297 its->collections[cpu].target_address = target;
2298 its->collections[cpu].col_id = cpu;
2299
2300 its_send_mapc(its, &its->collections[cpu], 1);
2301 its_send_invall(its, &its->collections[cpu]);
2302}
2303
2304static void its_cpu_init_collections(void)
2305{
2306 struct its_node *its;
2307
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02002308 raw_spin_lock(&its_lock);
Derek Basehore920181c2018-02-28 21:48:20 -08002309
2310 list_for_each_entry(its, &its_nodes, entry)
2311 its_cpu_init_collection(its);
2312
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02002313 raw_spin_unlock(&its_lock);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002314}
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002315
2316static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
2317{
2318 struct its_device *its_dev = NULL, *tmp;
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002319 unsigned long flags;
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002320
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002321 raw_spin_lock_irqsave(&its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002322
2323 list_for_each_entry(tmp, &its->its_device_list, entry) {
2324 if (tmp->device_id == dev_id) {
2325 its_dev = tmp;
2326 break;
2327 }
2328 }
2329
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002330 raw_spin_unlock_irqrestore(&its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002331
2332 return its_dev;
2333}
2334
Shanker Donthineni466b7d12016-03-09 22:10:49 -06002335static struct its_baser *its_get_baser(struct its_node *its, u32 type)
2336{
2337 int i;
2338
2339 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2340 if (GITS_BASER_TYPE(its->tables[i].val) == type)
2341 return &its->tables[i];
2342 }
2343
2344 return NULL;
2345}
2346
Shanker Donthineni539d3782019-01-14 09:50:19 +00002347static bool its_alloc_table_entry(struct its_node *its,
2348 struct its_baser *baser, u32 id)
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002349{
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002350 struct page *page;
2351 u32 esz, idx;
2352 __le64 *table;
2353
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002354 /* Don't allow device id that exceeds single, flat table limit */
2355 esz = GITS_BASER_ENTRY_SIZE(baser->val);
2356 if (!(baser->val & GITS_BASER_INDIRECT))
Marc Zyngier70cc81e2016-12-19 18:53:02 +00002357 return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002358
2359 /* Compute 1st level table index & check if that exceeds table limit */
Marc Zyngier70cc81e2016-12-19 18:53:02 +00002360 idx = id >> ilog2(baser->psz / esz);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002361 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
2362 return false;
2363
2364 table = baser->base;
2365
2366 /* Allocate memory for 2nd level table */
2367 if (!table[idx]) {
Shanker Donthineni539d3782019-01-14 09:50:19 +00002368 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
2369 get_order(baser->psz));
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002370 if (!page)
2371 return false;
2372
2373 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
2374 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
Vladimir Murzin328191c2016-11-02 11:54:05 +00002375 gic_flush_dcache_to_poc(page_address(page), baser->psz);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002376
2377 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
2378
2379 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
2380 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
Vladimir Murzin328191c2016-11-02 11:54:05 +00002381 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002382
2383 /* Ensure updated table contents are visible to ITS hardware */
2384 dsb(sy);
2385 }
2386
2387 return true;
2388}
2389
Marc Zyngier70cc81e2016-12-19 18:53:02 +00002390static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
2391{
2392 struct its_baser *baser;
2393
2394 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
2395
2396 /* Don't allow device id that exceeds ITS hardware limit */
2397 if (!baser)
Marc Zyngier576a8342019-11-08 16:58:00 +00002398 return (ilog2(dev_id) < device_ids(its));
Marc Zyngier70cc81e2016-12-19 18:53:02 +00002399
Shanker Donthineni539d3782019-01-14 09:50:19 +00002400 return its_alloc_table_entry(its, baser, dev_id);
Marc Zyngier70cc81e2016-12-19 18:53:02 +00002401}
2402
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002403static bool its_alloc_vpe_table(u32 vpe_id)
2404{
2405 struct its_node *its;
2406
2407 /*
2408 * Make sure the L2 tables are allocated on *all* v4 ITSs. We
2409 * could try and only do it on ITSs corresponding to devices
2410 * that have interrupts targeted at this VPE, but the
2411 * complexity becomes crazy (and you have tons of memory
2412 * anyway, right?).
2413 */
2414 list_for_each_entry(its, &its_nodes, entry) {
2415 struct its_baser *baser;
2416
Marc Zyngier0dd57fe2019-11-08 16:57:58 +00002417 if (!is_v4(its))
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002418 continue;
2419
2420 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
2421 if (!baser)
2422 return false;
2423
Shanker Donthineni539d3782019-01-14 09:50:19 +00002424 if (!its_alloc_table_entry(its, baser, vpe_id))
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002425 return false;
2426 }
2427
2428 return true;
2429}
2430
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002431static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002432 int nvecs, bool alloc_lpis)
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002433{
2434 struct its_device *dev;
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002435 unsigned long *lpi_map = NULL;
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002436 unsigned long flags;
Marc Zyngier591e5be2015-07-17 10:46:42 +01002437 u16 *col_map = NULL;
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002438 void *itt;
2439 int lpi_base;
2440 int nr_lpis;
Marc Zyngierc8481262014-12-12 10:51:24 +00002441 int nr_ites;
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002442 int sz;
2443
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002444 if (!its_alloc_device_table(its, dev_id))
Shanker Donthineni466b7d12016-03-09 22:10:49 -06002445 return NULL;
2446
Marc Zyngier147c8f32018-05-27 16:39:55 +01002447 if (WARN_ON(!is_power_of_2(nvecs)))
2448 nvecs = roundup_pow_of_two(nvecs);
2449
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002450 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
Marc Zyngierc8481262014-12-12 10:51:24 +00002451 /*
Marc Zyngier147c8f32018-05-27 16:39:55 +01002452 * Even if the device wants a single LPI, the ITT must be
2453 * sized as a power of two (and you need at least one bit...).
Marc Zyngierc8481262014-12-12 10:51:24 +00002454 */
Marc Zyngier147c8f32018-05-27 16:39:55 +01002455 nr_ites = max(2, nvecs);
Marc Zyngierffedbf02019-11-08 16:57:59 +00002456 sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002457 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
Shanker Donthineni539d3782019-01-14 09:50:19 +00002458 itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002459 if (alloc_lpis) {
Marc Zyngier38dd7c42018-05-27 17:03:03 +01002460 lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002461 if (lpi_map)
Kees Cook6396bb22018-06-12 14:03:40 -07002462 col_map = kcalloc(nr_lpis, sizeof(*col_map),
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002463 GFP_KERNEL);
2464 } else {
Kees Cook6396bb22018-06-12 14:03:40 -07002465 col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL);
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002466 nr_lpis = 0;
2467 lpi_base = 0;
2468 }
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002469
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002470 if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002471 kfree(dev);
2472 kfree(itt);
2473 kfree(lpi_map);
Marc Zyngier591e5be2015-07-17 10:46:42 +01002474 kfree(col_map);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002475 return NULL;
2476 }
2477
Vladimir Murzin328191c2016-11-02 11:54:05 +00002478 gic_flush_dcache_to_poc(itt, sz);
Marc Zyngier5a9a8912015-09-13 12:14:32 +01002479
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002480 dev->its = its;
2481 dev->itt = itt;
Marc Zyngierc8481262014-12-12 10:51:24 +00002482 dev->nr_ites = nr_ites;
Marc Zyngier591e5be2015-07-17 10:46:42 +01002483 dev->event_map.lpi_map = lpi_map;
2484 dev->event_map.col_map = col_map;
2485 dev->event_map.lpi_base = lpi_base;
2486 dev->event_map.nr_lpis = nr_lpis;
Marc Zyngierd011e4e2016-12-20 09:44:41 +00002487 mutex_init(&dev->event_map.vlpi_lock);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002488 dev->device_id = dev_id;
2489 INIT_LIST_HEAD(&dev->entry);
2490
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002491 raw_spin_lock_irqsave(&its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002492 list_add(&dev->entry, &its->its_device_list);
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002493 raw_spin_unlock_irqrestore(&its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002494
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002495 /* Map device to its ITT */
2496 its_send_mapd(dev, 1);
2497
2498 return dev;
2499}
2500
2501static void its_free_device(struct its_device *its_dev)
2502{
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002503 unsigned long flags;
2504
2505 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002506 list_del(&its_dev->entry);
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002507 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
Marc Zyngier898aa5c2019-11-08 16:57:55 +00002508 kfree(its_dev->event_map.col_map);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002509 kfree(its_dev->itt);
2510 kfree(its_dev);
2511}
Marc Zyngierb48ac832014-11-24 14:35:16 +00002512
Marc Zyngier8208d172019-01-18 14:08:59 +00002513static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
Marc Zyngierb48ac832014-11-24 14:35:16 +00002514{
2515 int idx;
2516
Zenghui Yu342be102019-07-27 06:14:22 +00002517 /* Find a free LPI region in lpi_map and allocate them. */
Marc Zyngier8208d172019-01-18 14:08:59 +00002518 idx = bitmap_find_free_region(dev->event_map.lpi_map,
2519 dev->event_map.nr_lpis,
2520 get_count_order(nvecs));
2521 if (idx < 0)
Marc Zyngierb48ac832014-11-24 14:35:16 +00002522 return -ENOSPC;
2523
Marc Zyngier591e5be2015-07-17 10:46:42 +01002524 *hwirq = dev->event_map.lpi_base + idx;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002525
Marc Zyngierb48ac832014-11-24 14:35:16 +00002526 return 0;
2527}
2528
Marc Zyngier54456db2015-07-28 14:46:21 +01002529static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
2530 int nvec, msi_alloc_info_t *info)
Marc Zyngiere8137f42015-03-06 16:37:42 +00002531{
Marc Zyngierb48ac832014-11-24 14:35:16 +00002532 struct its_node *its;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002533 struct its_device *its_dev;
Marc Zyngier54456db2015-07-28 14:46:21 +01002534 struct msi_domain_info *msi_info;
2535 u32 dev_id;
Marc Zyngier9791ec72019-01-29 10:02:33 +00002536 int err = 0;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002537
Marc Zyngier54456db2015-07-28 14:46:21 +01002538 /*
Julien Gralla7c90f52019-04-18 16:58:14 +01002539 * We ignore "dev" entirely, and rely on the dev_id that has
Marc Zyngier54456db2015-07-28 14:46:21 +01002540 * been passed via the scratchpad. This limits this domain's
2541 * usefulness to upper layers that definitely know that they
2542 * are built on top of the ITS.
2543 */
2544 dev_id = info->scratchpad[0].ul;
2545
2546 msi_info = msi_get_domain_info(domain);
2547 its = msi_info->data;
2548
Marc Zyngier20b3d542016-12-20 15:23:22 +00002549 if (!gic_rdists->has_direct_lpi &&
2550 vpe_proxy.dev &&
2551 vpe_proxy.dev->its == its &&
2552 dev_id == vpe_proxy.dev->device_id) {
2553 /* Bad luck. Get yourself a better implementation */
2554 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
2555 dev_id);
2556 return -EINVAL;
2557 }
2558
Marc Zyngier9791ec72019-01-29 10:02:33 +00002559 mutex_lock(&its->dev_alloc_lock);
Marc Zyngierf1304202015-07-28 14:46:18 +01002560 its_dev = its_find_device(its, dev_id);
Marc Zyngiere8137f42015-03-06 16:37:42 +00002561 if (its_dev) {
2562 /*
2563 * We already have seen this ID, probably through
2564 * another alias (PCI bridge of some sort). No need to
2565 * create the device.
2566 */
Marc Zyngier9791ec72019-01-29 10:02:33 +00002567 its_dev->shared = true;
Marc Zyngierf1304202015-07-28 14:46:18 +01002568 pr_debug("Reusing ITT for devID %x\n", dev_id);
Marc Zyngiere8137f42015-03-06 16:37:42 +00002569 goto out;
2570 }
Marc Zyngierb48ac832014-11-24 14:35:16 +00002571
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002572 its_dev = its_create_device(its, dev_id, nvec, true);
Marc Zyngier9791ec72019-01-29 10:02:33 +00002573 if (!its_dev) {
2574 err = -ENOMEM;
2575 goto out;
2576 }
Marc Zyngierb48ac832014-11-24 14:35:16 +00002577
Marc Zyngierf1304202015-07-28 14:46:18 +01002578 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
Marc Zyngiere8137f42015-03-06 16:37:42 +00002579out:
Marc Zyngier9791ec72019-01-29 10:02:33 +00002580 mutex_unlock(&its->dev_alloc_lock);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002581 info->scratchpad[0].ptr = its_dev;
Marc Zyngier9791ec72019-01-29 10:02:33 +00002582 return err;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002583}
2584
Marc Zyngier54456db2015-07-28 14:46:21 +01002585static struct msi_domain_ops its_msi_domain_ops = {
2586 .msi_prepare = its_msi_prepare,
2587};
2588
Marc Zyngierb48ac832014-11-24 14:35:16 +00002589static int its_irq_gic_domain_alloc(struct irq_domain *domain,
2590 unsigned int virq,
2591 irq_hw_number_t hwirq)
2592{
Marc Zyngierf833f572015-10-13 12:51:33 +01002593 struct irq_fwspec fwspec;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002594
Marc Zyngierf833f572015-10-13 12:51:33 +01002595 if (irq_domain_get_of_node(domain->parent)) {
2596 fwspec.fwnode = domain->parent->fwnode;
2597 fwspec.param_count = 3;
2598 fwspec.param[0] = GIC_IRQ_TYPE_LPI;
2599 fwspec.param[1] = hwirq;
2600 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02002601 } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
2602 fwspec.fwnode = domain->parent->fwnode;
2603 fwspec.param_count = 2;
2604 fwspec.param[0] = hwirq;
2605 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
Marc Zyngierf833f572015-10-13 12:51:33 +01002606 } else {
2607 return -EINVAL;
2608 }
Marc Zyngierb48ac832014-11-24 14:35:16 +00002609
Marc Zyngierf833f572015-10-13 12:51:33 +01002610 return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002611}
2612
2613static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2614 unsigned int nr_irqs, void *args)
2615{
2616 msi_alloc_info_t *info = args;
2617 struct its_device *its_dev = info->scratchpad[0].ptr;
Julien Grall35ae7df2019-05-01 14:58:21 +01002618 struct its_node *its = its_dev->its;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002619 irq_hw_number_t hwirq;
2620 int err;
2621 int i;
2622
Marc Zyngier8208d172019-01-18 14:08:59 +00002623 err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
2624 if (err)
2625 return err;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002626
Julien Grall35ae7df2019-05-01 14:58:21 +01002627 err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev));
2628 if (err)
2629 return err;
2630
Marc Zyngier8208d172019-01-18 14:08:59 +00002631 for (i = 0; i < nr_irqs; i++) {
2632 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002633 if (err)
2634 return err;
2635
2636 irq_domain_set_hwirq_and_chip(domain, virq + i,
Marc Zyngier8208d172019-01-18 14:08:59 +00002637 hwirq + i, &its_irq_chip, its_dev);
Marc Zyngier0d224d32017-08-18 09:39:18 +01002638 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
Marc Zyngierf1304202015-07-28 14:46:18 +01002639 pr_debug("ID:%d pID:%d vID:%d\n",
Marc Zyngier8208d172019-01-18 14:08:59 +00002640 (int)(hwirq + i - its_dev->event_map.lpi_base),
2641 (int)(hwirq + i), virq + i);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002642 }
2643
2644 return 0;
2645}
2646
Thomas Gleixner72491642017-09-13 23:29:10 +02002647static int its_irq_domain_activate(struct irq_domain *domain,
Thomas Gleixner702cb0a2017-12-29 16:59:06 +01002648 struct irq_data *d, bool reserve)
Marc Zyngieraca268d2014-12-12 10:51:23 +00002649{
2650 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2651 u32 event = its_get_event_id(d);
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02002652 const struct cpumask *cpu_mask = cpu_online_mask;
Marc Zyngier0d224d32017-08-18 09:39:18 +01002653 int cpu;
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02002654
2655 /* get the cpu_mask of local node */
2656 if (its_dev->its->numa_node >= 0)
2657 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
Marc Zyngieraca268d2014-12-12 10:51:23 +00002658
Marc Zyngier591e5be2015-07-17 10:46:42 +01002659 /* Bind the LPI to the first possible CPU */
Yang Yingliangc1797b12018-06-22 10:52:51 +01002660 cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
2661 if (cpu >= nr_cpu_ids) {
2662 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)
2663 return -EINVAL;
2664
2665 cpu = cpumask_first(cpu_online_mask);
2666 }
2667
Marc Zyngier0d224d32017-08-18 09:39:18 +01002668 its_dev->event_map.col_map[event] = cpu;
2669 irq_data_update_effective_affinity(d, cpumask_of(cpu));
Marc Zyngier591e5be2015-07-17 10:46:42 +01002670
Marc Zyngieraca268d2014-12-12 10:51:23 +00002671 /* Map the GIC IRQ and event to the device */
Marc Zyngier6a25ad32016-12-20 15:52:26 +00002672 its_send_mapti(its_dev, d->hwirq, event);
Thomas Gleixner72491642017-09-13 23:29:10 +02002673 return 0;
Marc Zyngieraca268d2014-12-12 10:51:23 +00002674}
2675
2676static void its_irq_domain_deactivate(struct irq_domain *domain,
2677 struct irq_data *d)
2678{
2679 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2680 u32 event = its_get_event_id(d);
2681
2682 /* Stop the delivery of interrupts */
2683 its_send_discard(its_dev, event);
2684}
2685
Marc Zyngierb48ac832014-11-24 14:35:16 +00002686static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
2687 unsigned int nr_irqs)
2688{
2689 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
2690 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
Marc Zyngier9791ec72019-01-29 10:02:33 +00002691 struct its_node *its = its_dev->its;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002692 int i;
2693
Marc Zyngierc9c96e32019-09-05 14:56:47 +01002694 bitmap_release_region(its_dev->event_map.lpi_map,
2695 its_get_event_id(irq_domain_get_irq_data(domain, virq)),
2696 get_count_order(nr_irqs));
2697
Marc Zyngierb48ac832014-11-24 14:35:16 +00002698 for (i = 0; i < nr_irqs; i++) {
2699 struct irq_data *data = irq_domain_get_irq_data(domain,
2700 virq + i);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002701 /* Nuke the entry in the domain */
Marc Zyngier2da39942014-12-12 10:51:22 +00002702 irq_domain_reset_irq_data(data);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002703 }
2704
Marc Zyngier9791ec72019-01-29 10:02:33 +00002705 mutex_lock(&its->dev_alloc_lock);
2706
2707 /*
2708 * If all interrupts have been freed, start mopping the
2709 * floor. This is conditionned on the device not being shared.
2710 */
2711 if (!its_dev->shared &&
2712 bitmap_empty(its_dev->event_map.lpi_map,
Marc Zyngier591e5be2015-07-17 10:46:42 +01002713 its_dev->event_map.nr_lpis)) {
Marc Zyngier38dd7c42018-05-27 17:03:03 +01002714 its_lpi_free(its_dev->event_map.lpi_map,
2715 its_dev->event_map.lpi_base,
2716 its_dev->event_map.nr_lpis);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002717
2718 /* Unmap device/itt */
2719 its_send_mapd(its_dev, 0);
2720 its_free_device(its_dev);
2721 }
2722
Marc Zyngier9791ec72019-01-29 10:02:33 +00002723 mutex_unlock(&its->dev_alloc_lock);
2724
Marc Zyngierb48ac832014-11-24 14:35:16 +00002725 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
2726}
2727
2728static const struct irq_domain_ops its_domain_ops = {
2729 .alloc = its_irq_domain_alloc,
2730 .free = its_irq_domain_free,
Marc Zyngieraca268d2014-12-12 10:51:23 +00002731 .activate = its_irq_domain_activate,
2732 .deactivate = its_irq_domain_deactivate,
Marc Zyngierb48ac832014-11-24 14:35:16 +00002733};
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00002734
Marc Zyngier20b3d542016-12-20 15:23:22 +00002735/*
2736 * This is insane.
2737 *
2738 * If a GICv4 doesn't implement Direct LPIs (which is extremely
2739 * likely), the only way to perform an invalidate is to use a fake
2740 * device to issue an INV command, implying that the LPI has first
2741 * been mapped to some event on that device. Since this is not exactly
2742 * cheap, we try to keep that mapping around as long as possible, and
2743 * only issue an UNMAP if we're short on available slots.
2744 *
2745 * Broken by design(tm).
2746 */
2747static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
2748{
2749 /* Already unmapped? */
2750 if (vpe->vpe_proxy_event == -1)
2751 return;
2752
2753 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
2754 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
2755
2756 /*
2757 * We don't track empty slots at all, so let's move the
2758 * next_victim pointer if we can quickly reuse that slot
2759 * instead of nuking an existing entry. Not clear that this is
2760 * always a win though, and this might just generate a ripple
2761 * effect... Let's just hope VPEs don't migrate too often.
2762 */
2763 if (vpe_proxy.vpes[vpe_proxy.next_victim])
2764 vpe_proxy.next_victim = vpe->vpe_proxy_event;
2765
2766 vpe->vpe_proxy_event = -1;
2767}
2768
2769static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
2770{
2771 if (!gic_rdists->has_direct_lpi) {
2772 unsigned long flags;
2773
2774 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2775 its_vpe_db_proxy_unmap_locked(vpe);
2776 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2777 }
2778}
2779
2780static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
2781{
2782 /* Already mapped? */
2783 if (vpe->vpe_proxy_event != -1)
2784 return;
2785
2786 /* This slot was already allocated. Kick the other VPE out. */
2787 if (vpe_proxy.vpes[vpe_proxy.next_victim])
2788 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
2789
2790 /* Map the new VPE instead */
2791 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
2792 vpe->vpe_proxy_event = vpe_proxy.next_victim;
2793 vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
2794
2795 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
2796 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
2797}
2798
Marc Zyngier958b90d2017-08-18 16:14:17 +01002799static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
2800{
2801 unsigned long flags;
2802 struct its_collection *target_col;
2803
2804 if (gic_rdists->has_direct_lpi) {
2805 void __iomem *rdbase;
2806
2807 rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
2808 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
Marc Zyngier2f4f0642019-11-08 16:57:56 +00002809 wait_for_syncr(rdbase);
Marc Zyngier958b90d2017-08-18 16:14:17 +01002810
2811 return;
2812 }
2813
2814 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2815
2816 its_vpe_db_proxy_map_locked(vpe);
2817
2818 target_col = &vpe_proxy.dev->its->collections[to];
2819 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
2820 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
2821
2822 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2823}
2824
Marc Zyngier3171a472016-12-20 15:17:28 +00002825static int its_vpe_set_affinity(struct irq_data *d,
2826 const struct cpumask *mask_val,
2827 bool force)
2828{
2829 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2830 int cpu = cpumask_first(mask_val);
2831
2832 /*
2833 * Changing affinity is mega expensive, so let's be as lazy as
Marc Zyngier20b3d542016-12-20 15:23:22 +00002834 * we can and only do it if we really have to. Also, if mapped
Marc Zyngier958b90d2017-08-18 16:14:17 +01002835 * into the proxy device, we need to move the doorbell
2836 * interrupt to its new location.
Marc Zyngier3171a472016-12-20 15:17:28 +00002837 */
2838 if (vpe->col_idx != cpu) {
Marc Zyngier958b90d2017-08-18 16:14:17 +01002839 int from = vpe->col_idx;
2840
Marc Zyngier3171a472016-12-20 15:17:28 +00002841 vpe->col_idx = cpu;
2842 its_send_vmovp(vpe);
Marc Zyngier958b90d2017-08-18 16:14:17 +01002843 its_vpe_db_proxy_move(vpe, from, cpu);
Marc Zyngier3171a472016-12-20 15:17:28 +00002844 }
2845
Marc Zyngier44c4c252017-10-19 10:11:34 +01002846 irq_data_update_effective_affinity(d, cpumask_of(cpu));
2847
Marc Zyngier3171a472016-12-20 15:17:28 +00002848 return IRQ_SET_MASK_OK_DONE;
2849}
2850
Marc Zyngiere643d802016-12-20 15:09:31 +00002851static void its_vpe_schedule(struct its_vpe *vpe)
2852{
Robin Murphy50c33092018-02-16 16:57:56 +00002853 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
Marc Zyngiere643d802016-12-20 15:09:31 +00002854 u64 val;
2855
2856 /* Schedule the VPE */
2857 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
2858 GENMASK_ULL(51, 12);
2859 val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
2860 val |= GICR_VPROPBASER_RaWb;
2861 val |= GICR_VPROPBASER_InnerShareable;
2862 gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2863
2864 val = virt_to_phys(page_address(vpe->vpt_page)) &
2865 GENMASK_ULL(51, 16);
2866 val |= GICR_VPENDBASER_RaWaWb;
2867 val |= GICR_VPENDBASER_NonShareable;
2868 /*
2869 * There is no good way of finding out if the pending table is
2870 * empty as we can race against the doorbell interrupt very
2871 * easily. So in the end, vpe->pending_last is only an
2872 * indication that the vcpu has something pending, not one
2873 * that the pending table is empty. A good implementation
2874 * would be able to read its coarse map pretty quickly anyway,
2875 * making this a tolerable issue.
2876 */
2877 val |= GICR_VPENDBASER_PendingLast;
2878 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
2879 val |= GICR_VPENDBASER_Valid;
2880 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2881}
2882
2883static void its_vpe_deschedule(struct its_vpe *vpe)
2884{
Robin Murphy50c33092018-02-16 16:57:56 +00002885 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
Marc Zyngiere643d802016-12-20 15:09:31 +00002886 u64 val;
2887
Heyi Guo64794502019-01-24 21:37:08 +08002888 val = its_clear_vpend_valid(vlpi_base);
Marc Zyngiere643d802016-12-20 15:09:31 +00002889
Heyi Guo64794502019-01-24 21:37:08 +08002890 if (unlikely(val & GICR_VPENDBASER_Dirty)) {
Marc Zyngiere643d802016-12-20 15:09:31 +00002891 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
2892 vpe->idai = false;
2893 vpe->pending_last = true;
2894 } else {
2895 vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
2896 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
2897 }
2898}
2899
Marc Zyngier40619a22017-10-08 15:16:09 +01002900static void its_vpe_invall(struct its_vpe *vpe)
2901{
2902 struct its_node *its;
2903
2904 list_for_each_entry(its, &its_nodes, entry) {
Marc Zyngier0dd57fe2019-11-08 16:57:58 +00002905 if (!is_v4(its))
Marc Zyngier40619a22017-10-08 15:16:09 +01002906 continue;
2907
Marc Zyngier2247e1b2017-10-08 18:50:36 +01002908 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
2909 continue;
2910
Marc Zyngier3c1ccee2017-10-09 13:17:43 +01002911 /*
2912 * Sending a VINVALL to a single ITS is enough, as all
2913 * we need is to reach the redistributors.
2914 */
Marc Zyngier40619a22017-10-08 15:16:09 +01002915 its_send_vinvall(its, vpe);
Marc Zyngier3c1ccee2017-10-09 13:17:43 +01002916 return;
Marc Zyngier40619a22017-10-08 15:16:09 +01002917 }
2918}
2919
Marc Zyngiere643d802016-12-20 15:09:31 +00002920static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
2921{
2922 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2923 struct its_cmd_info *info = vcpu_info;
2924
2925 switch (info->cmd_type) {
2926 case SCHEDULE_VPE:
2927 its_vpe_schedule(vpe);
2928 return 0;
2929
2930 case DESCHEDULE_VPE:
2931 its_vpe_deschedule(vpe);
2932 return 0;
2933
Marc Zyngier5e2f7642016-12-20 15:10:50 +00002934 case INVALL_VPE:
Marc Zyngier40619a22017-10-08 15:16:09 +01002935 its_vpe_invall(vpe);
Marc Zyngier5e2f7642016-12-20 15:10:50 +00002936 return 0;
2937
Marc Zyngiere643d802016-12-20 15:09:31 +00002938 default:
2939 return -EINVAL;
2940 }
2941}
2942
Marc Zyngier20b3d542016-12-20 15:23:22 +00002943static void its_vpe_send_cmd(struct its_vpe *vpe,
2944 void (*cmd)(struct its_device *, u32))
2945{
2946 unsigned long flags;
2947
2948 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2949
2950 its_vpe_db_proxy_map_locked(vpe);
2951 cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
2952
2953 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2954}
2955
Marc Zyngierf6a91da2016-12-20 15:20:38 +00002956static void its_vpe_send_inv(struct irq_data *d)
2957{
2958 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
Marc Zyngierf6a91da2016-12-20 15:20:38 +00002959
Marc Zyngier20b3d542016-12-20 15:23:22 +00002960 if (gic_rdists->has_direct_lpi) {
2961 void __iomem *rdbase;
2962
Marc Zyngier425c09b2019-11-08 16:57:57 +00002963 /* Target the redistributor this VPE is currently known on */
Marc Zyngier20b3d542016-12-20 15:23:22 +00002964 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
Marc Zyngier425c09b2019-11-08 16:57:57 +00002965 gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR);
Marc Zyngier2f4f0642019-11-08 16:57:56 +00002966 wait_for_syncr(rdbase);
Marc Zyngier20b3d542016-12-20 15:23:22 +00002967 } else {
2968 its_vpe_send_cmd(vpe, its_send_inv);
2969 }
Marc Zyngierf6a91da2016-12-20 15:20:38 +00002970}
2971
2972static void its_vpe_mask_irq(struct irq_data *d)
2973{
2974 /*
2975 * We need to unmask the LPI, which is described by the parent
2976 * irq_data. Instead of calling into the parent (which won't
2977 * exactly do the right thing, let's simply use the
2978 * parent_data pointer. Yes, I'm naughty.
2979 */
2980 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
2981 its_vpe_send_inv(d);
2982}
2983
2984static void its_vpe_unmask_irq(struct irq_data *d)
2985{
2986 /* Same hack as above... */
2987 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
2988 its_vpe_send_inv(d);
2989}
2990
Marc Zyngiere57a3e282017-07-31 14:47:24 +01002991static int its_vpe_set_irqchip_state(struct irq_data *d,
2992 enum irqchip_irq_state which,
2993 bool state)
2994{
2995 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2996
2997 if (which != IRQCHIP_STATE_PENDING)
2998 return -EINVAL;
2999
3000 if (gic_rdists->has_direct_lpi) {
3001 void __iomem *rdbase;
3002
3003 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
3004 if (state) {
3005 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
3006 } else {
3007 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
Marc Zyngier2f4f0642019-11-08 16:57:56 +00003008 wait_for_syncr(rdbase);
Marc Zyngiere57a3e282017-07-31 14:47:24 +01003009 }
3010 } else {
3011 if (state)
3012 its_vpe_send_cmd(vpe, its_send_int);
3013 else
3014 its_vpe_send_cmd(vpe, its_send_clear);
3015 }
3016
3017 return 0;
3018}
3019
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003020static struct irq_chip its_vpe_irq_chip = {
3021 .name = "GICv4-vpe",
Marc Zyngierf6a91da2016-12-20 15:20:38 +00003022 .irq_mask = its_vpe_mask_irq,
3023 .irq_unmask = its_vpe_unmask_irq,
3024 .irq_eoi = irq_chip_eoi_parent,
Marc Zyngier3171a472016-12-20 15:17:28 +00003025 .irq_set_affinity = its_vpe_set_affinity,
Marc Zyngiere57a3e282017-07-31 14:47:24 +01003026 .irq_set_irqchip_state = its_vpe_set_irqchip_state,
Marc Zyngiere643d802016-12-20 15:09:31 +00003027 .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003028};
3029
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003030static int its_vpe_id_alloc(void)
3031{
Shanker Donthineni32bd44d2017-10-07 15:43:48 -05003032 return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003033}
3034
3035static void its_vpe_id_free(u16 id)
3036{
3037 ida_simple_remove(&its_vpeid_ida, id);
3038}
3039
3040static int its_vpe_init(struct its_vpe *vpe)
3041{
3042 struct page *vpt_page;
3043 int vpe_id;
3044
3045 /* Allocate vpe_id */
3046 vpe_id = its_vpe_id_alloc();
3047 if (vpe_id < 0)
3048 return vpe_id;
3049
3050 /* Allocate VPT */
3051 vpt_page = its_allocate_pending_table(GFP_KERNEL);
3052 if (!vpt_page) {
3053 its_vpe_id_free(vpe_id);
3054 return -ENOMEM;
3055 }
3056
3057 if (!its_alloc_vpe_table(vpe_id)) {
3058 its_vpe_id_free(vpe_id);
Nianyao Tang34f8eb92019-07-26 17:32:57 +08003059 its_free_pending_table(vpt_page);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003060 return -ENOMEM;
3061 }
3062
3063 vpe->vpe_id = vpe_id;
3064 vpe->vpt_page = vpt_page;
Marc Zyngier20b3d542016-12-20 15:23:22 +00003065 vpe->vpe_proxy_event = -1;
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003066
3067 return 0;
3068}
3069
3070static void its_vpe_teardown(struct its_vpe *vpe)
3071{
Marc Zyngier20b3d542016-12-20 15:23:22 +00003072 its_vpe_db_proxy_unmap(vpe);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003073 its_vpe_id_free(vpe->vpe_id);
3074 its_free_pending_table(vpe->vpt_page);
3075}
3076
3077static void its_vpe_irq_domain_free(struct irq_domain *domain,
3078 unsigned int virq,
3079 unsigned int nr_irqs)
3080{
3081 struct its_vm *vm = domain->host_data;
3082 int i;
3083
3084 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
3085
3086 for (i = 0; i < nr_irqs; i++) {
3087 struct irq_data *data = irq_domain_get_irq_data(domain,
3088 virq + i);
3089 struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
3090
3091 BUG_ON(vm != vpe->its_vm);
3092
3093 clear_bit(data->hwirq, vm->db_bitmap);
3094 its_vpe_teardown(vpe);
3095 irq_domain_reset_irq_data(data);
3096 }
3097
3098 if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
Marc Zyngier38dd7c42018-05-27 17:03:03 +01003099 its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003100 its_free_prop_table(vm->vprop_page);
3101 }
3102}
3103
3104static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
3105 unsigned int nr_irqs, void *args)
3106{
3107 struct its_vm *vm = args;
3108 unsigned long *bitmap;
3109 struct page *vprop_page;
3110 int base, nr_ids, i, err = 0;
3111
3112 BUG_ON(!vm);
3113
Marc Zyngier38dd7c42018-05-27 17:03:03 +01003114 bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003115 if (!bitmap)
3116 return -ENOMEM;
3117
3118 if (nr_ids < nr_irqs) {
Marc Zyngier38dd7c42018-05-27 17:03:03 +01003119 its_lpi_free(bitmap, base, nr_ids);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003120 return -ENOMEM;
3121 }
3122
3123 vprop_page = its_allocate_prop_table(GFP_KERNEL);
3124 if (!vprop_page) {
Marc Zyngier38dd7c42018-05-27 17:03:03 +01003125 its_lpi_free(bitmap, base, nr_ids);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003126 return -ENOMEM;
3127 }
3128
3129 vm->db_bitmap = bitmap;
3130 vm->db_lpi_base = base;
3131 vm->nr_db_lpis = nr_ids;
3132 vm->vprop_page = vprop_page;
3133
3134 for (i = 0; i < nr_irqs; i++) {
3135 vm->vpes[i]->vpe_db_lpi = base + i;
3136 err = its_vpe_init(vm->vpes[i]);
3137 if (err)
3138 break;
3139 err = its_irq_gic_domain_alloc(domain, virq + i,
3140 vm->vpes[i]->vpe_db_lpi);
3141 if (err)
3142 break;
3143 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
3144 &its_vpe_irq_chip, vm->vpes[i]);
3145 set_bit(i, bitmap);
3146 }
3147
3148 if (err) {
3149 if (i > 0)
3150 its_vpe_irq_domain_free(domain, virq, i - 1);
3151
Marc Zyngier38dd7c42018-05-27 17:03:03 +01003152 its_lpi_free(bitmap, base, nr_ids);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003153 its_free_prop_table(vprop_page);
3154 }
3155
3156 return err;
3157}
3158
Thomas Gleixner72491642017-09-13 23:29:10 +02003159static int its_vpe_irq_domain_activate(struct irq_domain *domain,
Thomas Gleixner702cb0a2017-12-29 16:59:06 +01003160 struct irq_data *d, bool reserve)
Marc Zyngiereb781922016-12-20 14:47:05 +00003161{
3162 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
Marc Zyngier40619a22017-10-08 15:16:09 +01003163 struct its_node *its;
Marc Zyngiereb781922016-12-20 14:47:05 +00003164
Marc Zyngier2247e1b2017-10-08 18:50:36 +01003165 /* If we use the list map, we issue VMAPP on demand... */
3166 if (its_list_map)
Marc Zyngier6ef930f2017-11-07 10:04:38 +00003167 return 0;
Marc Zyngiereb781922016-12-20 14:47:05 +00003168
3169 /* Map the VPE to the first possible CPU */
3170 vpe->col_idx = cpumask_first(cpu_online_mask);
Marc Zyngier40619a22017-10-08 15:16:09 +01003171
3172 list_for_each_entry(its, &its_nodes, entry) {
Marc Zyngier0dd57fe2019-11-08 16:57:58 +00003173 if (!is_v4(its))
Marc Zyngier40619a22017-10-08 15:16:09 +01003174 continue;
3175
Marc Zyngier75fd9512017-10-08 18:46:39 +01003176 its_send_vmapp(its, vpe, true);
Marc Zyngier40619a22017-10-08 15:16:09 +01003177 its_send_vinvall(its, vpe);
3178 }
3179
Marc Zyngier44c4c252017-10-19 10:11:34 +01003180 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
3181
Thomas Gleixner72491642017-09-13 23:29:10 +02003182 return 0;
Marc Zyngiereb781922016-12-20 14:47:05 +00003183}
3184
3185static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
3186 struct irq_data *d)
3187{
3188 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
Marc Zyngier75fd9512017-10-08 18:46:39 +01003189 struct its_node *its;
Marc Zyngiereb781922016-12-20 14:47:05 +00003190
Marc Zyngier2247e1b2017-10-08 18:50:36 +01003191 /*
3192 * If we use the list map, we unmap the VPE once no VLPIs are
3193 * associated with the VM.
3194 */
3195 if (its_list_map)
3196 return;
3197
Marc Zyngier75fd9512017-10-08 18:46:39 +01003198 list_for_each_entry(its, &its_nodes, entry) {
Marc Zyngier0dd57fe2019-11-08 16:57:58 +00003199 if (!is_v4(its))
Marc Zyngier75fd9512017-10-08 18:46:39 +01003200 continue;
3201
3202 its_send_vmapp(its, vpe, false);
3203 }
Marc Zyngiereb781922016-12-20 14:47:05 +00003204}
3205
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003206static const struct irq_domain_ops its_vpe_domain_ops = {
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003207 .alloc = its_vpe_irq_domain_alloc,
3208 .free = its_vpe_irq_domain_free,
Marc Zyngiereb781922016-12-20 14:47:05 +00003209 .activate = its_vpe_irq_domain_activate,
3210 .deactivate = its_vpe_irq_domain_deactivate,
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003211};
3212
Yun Wu4559fbb2015-03-06 16:37:50 +00003213static int its_force_quiescent(void __iomem *base)
3214{
3215 u32 count = 1000000; /* 1s */
3216 u32 val;
3217
3218 val = readl_relaxed(base + GITS_CTLR);
David Daney7611da82016-08-18 15:41:58 -07003219 /*
3220 * GIC architecture specification requires the ITS to be both
3221 * disabled and quiescent for writes to GITS_BASER<n> or
3222 * GITS_CBASER to not have UNPREDICTABLE results.
3223 */
3224 if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
Yun Wu4559fbb2015-03-06 16:37:50 +00003225 return 0;
3226
3227 /* Disable the generation of all interrupts to this ITS */
Marc Zyngierd51c4b42017-06-27 21:24:25 +01003228 val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
Yun Wu4559fbb2015-03-06 16:37:50 +00003229 writel_relaxed(val, base + GITS_CTLR);
3230
3231 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
3232 while (1) {
3233 val = readl_relaxed(base + GITS_CTLR);
3234 if (val & GITS_CTLR_QUIESCENT)
3235 return 0;
3236
3237 count--;
3238 if (!count)
3239 return -EBUSY;
3240
3241 cpu_relax();
3242 udelay(1);
3243 }
3244}
3245
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01003246static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
Robert Richter94100972015-09-21 22:58:38 +02003247{
3248 struct its_node *its = data;
3249
Marc Zyngier576a8342019-11-08 16:58:00 +00003250 /* erratum 22375: only alloc 8MB table size (20 bits) */
3251 its->typer &= ~GITS_TYPER_DEVBITS;
3252 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1);
Robert Richter94100972015-09-21 22:58:38 +02003253 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01003254
3255 return true;
Robert Richter94100972015-09-21 22:58:38 +02003256}
3257
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01003258static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02003259{
3260 struct its_node *its = data;
3261
3262 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01003263
3264 return true;
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02003265}
3266
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01003267static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
Shanker Donthineni90922a22017-03-07 08:20:38 -06003268{
3269 struct its_node *its = data;
3270
3271 /* On QDF2400, the size of the ITE is 16Bytes */
Marc Zyngierffedbf02019-11-08 16:57:59 +00003272 its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE;
3273 its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1);
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01003274
3275 return true;
Shanker Donthineni90922a22017-03-07 08:20:38 -06003276}
3277
Ard Biesheuvel558b0162017-10-17 17:55:56 +01003278static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
3279{
3280 struct its_node *its = its_dev->its;
3281
3282 /*
3283 * The Socionext Synquacer SoC has a so-called 'pre-ITS',
3284 * which maps 32-bit writes targeted at a separate window of
3285 * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
3286 * with device ID taken from bits [device_id_bits + 1:2] of
3287 * the window offset.
3288 */
3289 return its->pre_its_base + (its_dev->device_id << 2);
3290}
3291
3292static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
3293{
3294 struct its_node *its = data;
3295 u32 pre_its_window[2];
3296 u32 ids;
3297
3298 if (!fwnode_property_read_u32_array(its->fwnode_handle,
3299 "socionext,synquacer-pre-its",
3300 pre_its_window,
3301 ARRAY_SIZE(pre_its_window))) {
3302
3303 its->pre_its_base = pre_its_window[0];
3304 its->get_msi_base = its_irq_get_msi_base_pre_its;
3305
3306 ids = ilog2(pre_its_window[1]) - 2;
Marc Zyngier576a8342019-11-08 16:58:00 +00003307 if (device_ids(its) > ids) {
3308 its->typer &= ~GITS_TYPER_DEVBITS;
3309 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1);
3310 }
Ard Biesheuvel558b0162017-10-17 17:55:56 +01003311
3312 /* the pre-ITS breaks isolation, so disable MSI remapping */
3313 its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
3314 return true;
3315 }
3316 return false;
3317}
3318
Marc Zyngier5c9a8822017-07-28 21:20:37 +01003319static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
3320{
3321 struct its_node *its = data;
3322
3323 /*
3324 * Hip07 insists on using the wrong address for the VLPI
3325 * page. Trick it into doing the right thing...
3326 */
3327 its->vlpi_redist_offset = SZ_128K;
3328 return true;
Marc Zyngiercc2d3212014-11-24 14:35:11 +00003329}
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003330
Robert Richter67510cc2015-09-21 22:58:37 +02003331static const struct gic_quirk its_quirks[] = {
Robert Richter94100972015-09-21 22:58:38 +02003332#ifdef CONFIG_CAVIUM_ERRATUM_22375
3333 {
3334 .desc = "ITS: Cavium errata 22375, 24313",
3335 .iidr = 0xa100034c, /* ThunderX pass 1.x */
3336 .mask = 0xffff0fff,
3337 .init = its_enable_quirk_cavium_22375,
3338 },
3339#endif
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02003340#ifdef CONFIG_CAVIUM_ERRATUM_23144
3341 {
3342 .desc = "ITS: Cavium erratum 23144",
3343 .iidr = 0xa100034c, /* ThunderX pass 1.x */
3344 .mask = 0xffff0fff,
3345 .init = its_enable_quirk_cavium_23144,
3346 },
3347#endif
Shanker Donthineni90922a22017-03-07 08:20:38 -06003348#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
3349 {
3350 .desc = "ITS: QDF2400 erratum 0065",
3351 .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */
3352 .mask = 0xffffffff,
3353 .init = its_enable_quirk_qdf2400_e0065,
3354 },
3355#endif
Ard Biesheuvel558b0162017-10-17 17:55:56 +01003356#ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
3357 {
3358 /*
3359 * The Socionext Synquacer SoC incorporates ARM's own GIC-500
3360 * implementation, but with a 'pre-ITS' added that requires
3361 * special handling in software.
3362 */
3363 .desc = "ITS: Socionext Synquacer pre-ITS",
3364 .iidr = 0x0001143b,
3365 .mask = 0xffffffff,
3366 .init = its_enable_quirk_socionext_synquacer,
3367 },
3368#endif
Marc Zyngier5c9a8822017-07-28 21:20:37 +01003369#ifdef CONFIG_HISILICON_ERRATUM_161600802
3370 {
3371 .desc = "ITS: Hip07 erratum 161600802",
3372 .iidr = 0x00000004,
3373 .mask = 0xffffffff,
3374 .init = its_enable_quirk_hip07_161600802,
3375 },
3376#endif
Robert Richter67510cc2015-09-21 22:58:37 +02003377 {
3378 }
3379};
3380
3381static void its_enable_quirks(struct its_node *its)
3382{
3383 u32 iidr = readl_relaxed(its->base + GITS_IIDR);
3384
3385 gic_enable_quirks(iidr, its_quirks, its);
3386}
3387
Derek Basehoredba0bc72018-02-28 21:48:18 -08003388static int its_save_disable(void)
3389{
3390 struct its_node *its;
3391 int err = 0;
3392
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02003393 raw_spin_lock(&its_lock);
Derek Basehoredba0bc72018-02-28 21:48:18 -08003394 list_for_each_entry(its, &its_nodes, entry) {
3395 void __iomem *base;
3396
3397 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
3398 continue;
3399
3400 base = its->base;
3401 its->ctlr_save = readl_relaxed(base + GITS_CTLR);
3402 err = its_force_quiescent(base);
3403 if (err) {
3404 pr_err("ITS@%pa: failed to quiesce: %d\n",
3405 &its->phys_base, err);
3406 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
3407 goto err;
3408 }
3409
3410 its->cbaser_save = gits_read_cbaser(base + GITS_CBASER);
3411 }
3412
3413err:
3414 if (err) {
3415 list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
3416 void __iomem *base;
3417
3418 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
3419 continue;
3420
3421 base = its->base;
3422 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
3423 }
3424 }
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02003425 raw_spin_unlock(&its_lock);
Derek Basehoredba0bc72018-02-28 21:48:18 -08003426
3427 return err;
3428}
3429
3430static void its_restore_enable(void)
3431{
3432 struct its_node *its;
3433 int ret;
3434
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02003435 raw_spin_lock(&its_lock);
Derek Basehoredba0bc72018-02-28 21:48:18 -08003436 list_for_each_entry(its, &its_nodes, entry) {
3437 void __iomem *base;
3438 int i;
3439
3440 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
3441 continue;
3442
3443 base = its->base;
3444
3445 /*
3446 * Make sure that the ITS is disabled. If it fails to quiesce,
3447 * don't restore it since writing to CBASER or BASER<n>
3448 * registers is undefined according to the GIC v3 ITS
3449 * Specification.
3450 */
3451 ret = its_force_quiescent(base);
3452 if (ret) {
3453 pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
3454 &its->phys_base, ret);
3455 continue;
3456 }
3457
3458 gits_write_cbaser(its->cbaser_save, base + GITS_CBASER);
3459
3460 /*
3461 * Writing CBASER resets CREADR to 0, so make CWRITER and
3462 * cmd_write line up with it.
3463 */
3464 its->cmd_write = its->cmd_base;
3465 gits_write_cwriter(0, base + GITS_CWRITER);
3466
3467 /* Restore GITS_BASER from the value cache. */
3468 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
3469 struct its_baser *baser = &its->tables[i];
3470
3471 if (!(baser->val & GITS_BASER_VALID))
3472 continue;
3473
3474 its_write_baser(its, baser, baser->val);
3475 }
3476 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
Derek Basehore920181c2018-02-28 21:48:20 -08003477
3478 /*
3479 * Reinit the collection if it's stored in the ITS. This is
3480 * indicated by the col_id being less than the HCC field.
3481 * CID < HCC as specified in the GIC v3 Documentation.
3482 */
3483 if (its->collections[smp_processor_id()].col_id <
3484 GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
3485 its_cpu_init_collection(its);
Derek Basehoredba0bc72018-02-28 21:48:18 -08003486 }
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02003487 raw_spin_unlock(&its_lock);
Derek Basehoredba0bc72018-02-28 21:48:18 -08003488}
3489
3490static struct syscore_ops its_syscore_ops = {
3491 .suspend = its_save_disable,
3492 .resume = its_restore_enable,
3493};
3494
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003495static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003496{
3497 struct irq_domain *inner_domain;
3498 struct msi_domain_info *info;
3499
3500 info = kzalloc(sizeof(*info), GFP_KERNEL);
3501 if (!info)
3502 return -ENOMEM;
3503
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003504 inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its);
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003505 if (!inner_domain) {
3506 kfree(info);
3507 return -ENOMEM;
3508 }
3509
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003510 inner_domain->parent = its_parent;
Marc Zyngier96f0d932017-06-22 11:42:50 +01003511 irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
Ard Biesheuvel558b0162017-10-17 17:55:56 +01003512 inner_domain->flags |= its->msi_domain_flags;
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003513 info->ops = &its_msi_domain_ops;
3514 info->data = its;
3515 inner_domain->host_data = info;
3516
3517 return 0;
3518}
3519
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003520static int its_init_vpe_domain(void)
3521{
Marc Zyngier20b3d542016-12-20 15:23:22 +00003522 struct its_node *its;
3523 u32 devid;
3524 int entries;
3525
3526 if (gic_rdists->has_direct_lpi) {
3527 pr_info("ITS: Using DirectLPI for VPE invalidation\n");
3528 return 0;
3529 }
3530
3531 /* Any ITS will do, even if not v4 */
3532 its = list_first_entry(&its_nodes, struct its_node, entry);
3533
3534 entries = roundup_pow_of_two(nr_cpu_ids);
Kees Cook6396bb22018-06-12 14:03:40 -07003535 vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes),
Marc Zyngier20b3d542016-12-20 15:23:22 +00003536 GFP_KERNEL);
3537 if (!vpe_proxy.vpes) {
3538 pr_err("ITS: Can't allocate GICv4 proxy device array\n");
3539 return -ENOMEM;
3540 }
3541
3542 /* Use the last possible DevID */
Marc Zyngier576a8342019-11-08 16:58:00 +00003543 devid = GENMASK(device_ids(its) - 1, 0);
Marc Zyngier20b3d542016-12-20 15:23:22 +00003544 vpe_proxy.dev = its_create_device(its, devid, entries, false);
3545 if (!vpe_proxy.dev) {
3546 kfree(vpe_proxy.vpes);
3547 pr_err("ITS: Can't allocate GICv4 proxy device\n");
3548 return -ENOMEM;
3549 }
3550
Shanker Donthinenic427a472017-09-23 13:50:19 -05003551 BUG_ON(entries > vpe_proxy.dev->nr_ites);
Marc Zyngier20b3d542016-12-20 15:23:22 +00003552
3553 raw_spin_lock_init(&vpe_proxy.lock);
3554 vpe_proxy.next_victim = 0;
3555 pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
3556 devid, vpe_proxy.dev->nr_ites);
3557
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003558 return 0;
3559}
3560
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003561static int __init its_compute_its_list_map(struct resource *res,
3562 void __iomem *its_base)
3563{
3564 int its_number;
3565 u32 ctlr;
3566
3567 /*
3568 * This is assumed to be done early enough that we're
3569 * guaranteed to be single-threaded, hence no
3570 * locking. Should this change, we should address
3571 * this.
3572 */
Marc Zyngierab604912017-10-08 18:48:06 +01003573 its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
3574 if (its_number >= GICv4_ITS_LIST_MAX) {
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003575 pr_err("ITS@%pa: No ITSList entry available!\n",
3576 &res->start);
3577 return -EINVAL;
3578 }
3579
3580 ctlr = readl_relaxed(its_base + GITS_CTLR);
3581 ctlr &= ~GITS_CTLR_ITS_NUMBER;
3582 ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
3583 writel_relaxed(ctlr, its_base + GITS_CTLR);
3584 ctlr = readl_relaxed(its_base + GITS_CTLR);
3585 if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
3586 its_number = ctlr & GITS_CTLR_ITS_NUMBER;
3587 its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
3588 }
3589
3590 if (test_and_set_bit(its_number, &its_list_map)) {
3591 pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
3592 &res->start, its_number);
3593 return -EINVAL;
3594 }
3595
3596 return its_number;
3597}
3598
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003599static int __init its_probe_one(struct resource *res,
3600 struct fwnode_handle *handle, int numa_node)
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003601{
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003602 struct its_node *its;
3603 void __iomem *its_base;
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003604 u32 val, ctlr;
3605 u64 baser, tmp, typer;
Shanker Donthineni539d3782019-01-14 09:50:19 +00003606 struct page *page;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003607 int err;
3608
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003609 its_base = ioremap(res->start, resource_size(res));
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003610 if (!its_base) {
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003611 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003612 return -ENOMEM;
3613 }
3614
3615 val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
3616 if (val != 0x30 && val != 0x40) {
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003617 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003618 err = -ENODEV;
3619 goto out_unmap;
3620 }
3621
Yun Wu4559fbb2015-03-06 16:37:50 +00003622 err = its_force_quiescent(its_base);
3623 if (err) {
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003624 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
Yun Wu4559fbb2015-03-06 16:37:50 +00003625 goto out_unmap;
3626 }
3627
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003628 pr_info("ITS %pR\n", res);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003629
3630 its = kzalloc(sizeof(*its), GFP_KERNEL);
3631 if (!its) {
3632 err = -ENOMEM;
3633 goto out_unmap;
3634 }
3635
3636 raw_spin_lock_init(&its->lock);
Marc Zyngier9791ec72019-01-29 10:02:33 +00003637 mutex_init(&its->dev_alloc_lock);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003638 INIT_LIST_HEAD(&its->entry);
3639 INIT_LIST_HEAD(&its->its_device_list);
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003640 typer = gic_read_typer(its_base + GITS_TYPER);
Marc Zyngier0dd57fe2019-11-08 16:57:58 +00003641 its->typer = typer;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003642 its->base = its_base;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003643 its->phys_base = res->start;
Marc Zyngier0dd57fe2019-11-08 16:57:58 +00003644 if (is_v4(its)) {
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003645 if (!(typer & GITS_TYPER_VMOVP)) {
3646 err = its_compute_its_list_map(res, its_base);
3647 if (err < 0)
3648 goto out_free_its;
3649
Marc Zyngierdebf6d02017-10-08 18:44:42 +01003650 its->list_nr = err;
3651
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003652 pr_info("ITS@%pa: Using ITS number %d\n",
3653 &res->start, err);
3654 } else {
3655 pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
3656 }
3657 }
3658
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003659 its->numa_node = numa_node;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003660
Shanker Donthineni539d3782019-01-14 09:50:19 +00003661 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
3662 get_order(ITS_CMD_QUEUE_SZ));
3663 if (!page) {
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003664 err = -ENOMEM;
3665 goto out_free_its;
3666 }
Shanker Donthineni539d3782019-01-14 09:50:19 +00003667 its->cmd_base = (void *)page_address(page);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003668 its->cmd_write = its->cmd_base;
Ard Biesheuvel558b0162017-10-17 17:55:56 +01003669 its->fwnode_handle = handle;
3670 its->get_msi_base = its_irq_get_msi_base;
3671 its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003672
Robert Richter67510cc2015-09-21 22:58:37 +02003673 its_enable_quirks(its);
3674
Shanker Donthineni0e0b0f62016-06-06 18:17:31 -05003675 err = its_alloc_tables(its);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003676 if (err)
3677 goto out_free_cmd;
3678
3679 err = its_alloc_collections(its);
3680 if (err)
3681 goto out_free_tables;
3682
3683 baser = (virt_to_phys(its->cmd_base) |
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06003684 GITS_CBASER_RaWaWb |
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003685 GITS_CBASER_InnerShareable |
3686 (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
3687 GITS_CBASER_VALID);
3688
Vladimir Murzin0968a612016-11-02 11:54:06 +00003689 gits_write_cbaser(baser, its->base + GITS_CBASER);
3690 tmp = gits_read_cbaser(its->base + GITS_CBASER);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003691
Marc Zyngier4ad3e362015-03-27 14:15:04 +00003692 if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
Marc Zyngier241a3862015-03-27 14:15:05 +00003693 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
3694 /*
3695 * The HW reports non-shareable, we must
3696 * remove the cacheability attributes as
3697 * well.
3698 */
3699 baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
3700 GITS_CBASER_CACHEABILITY_MASK);
3701 baser |= GITS_CBASER_nC;
Vladimir Murzin0968a612016-11-02 11:54:06 +00003702 gits_write_cbaser(baser, its->base + GITS_CBASER);
Marc Zyngier241a3862015-03-27 14:15:05 +00003703 }
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003704 pr_info("ITS: using cache flushing for cmd queue\n");
3705 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
3706 }
3707
Vladimir Murzin0968a612016-11-02 11:54:06 +00003708 gits_write_cwriter(0, its->base + GITS_CWRITER);
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003709 ctlr = readl_relaxed(its->base + GITS_CTLR);
Marc Zyngierd51c4b42017-06-27 21:24:25 +01003710 ctlr |= GITS_CTLR_ENABLE;
Marc Zyngier0dd57fe2019-11-08 16:57:58 +00003711 if (is_v4(its))
Marc Zyngierd51c4b42017-06-27 21:24:25 +01003712 ctlr |= GITS_CTLR_ImDe;
3713 writel_relaxed(ctlr, its->base + GITS_CTLR);
Marc Zyngier241a3862015-03-27 14:15:05 +00003714
Derek Basehoredba0bc72018-02-28 21:48:18 -08003715 if (GITS_TYPER_HCC(typer))
3716 its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE;
3717
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003718 err = its_init_domain(handle, its);
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003719 if (err)
3720 goto out_free_tables;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003721
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02003722 raw_spin_lock(&its_lock);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003723 list_add(&its->entry, &its_nodes);
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02003724 raw_spin_unlock(&its_lock);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003725
3726 return 0;
3727
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003728out_free_tables:
3729 its_free_tables(its);
3730out_free_cmd:
Robert Richter5bc13c22017-02-01 18:38:25 +01003731 free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003732out_free_its:
3733 kfree(its);
3734out_unmap:
3735 iounmap(its_base);
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003736 pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003737 return err;
3738}
3739
3740static bool gic_rdists_supports_plpis(void)
3741{
Marc Zyngier589ce5f2016-10-14 15:13:07 +01003742 return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003743}
3744
Shanker Donthineni6eb486b2018-03-21 20:58:49 -05003745static int redist_disable_lpis(void)
3746{
3747 void __iomem *rbase = gic_data_rdist_rd_base();
3748 u64 timeout = USEC_PER_SEC;
3749 u64 val;
3750
3751 if (!gic_rdists_supports_plpis()) {
3752 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
3753 return -ENXIO;
3754 }
3755
3756 val = readl_relaxed(rbase + GICR_CTLR);
3757 if (!(val & GICR_CTLR_ENABLE_LPIS))
3758 return 0;
3759
Marc Zyngier11e37d32018-07-27 13:38:54 +01003760 /*
3761 * If coming via a CPU hotplug event, we don't need to disable
3762 * LPIs before trying to re-enable them. They are already
3763 * configured and all is well in the world.
Marc Zyngierc440a9d2018-07-27 15:40:13 +01003764 *
3765 * If running with preallocated tables, there is nothing to do.
Marc Zyngier11e37d32018-07-27 13:38:54 +01003766 */
Marc Zyngierc440a9d2018-07-27 15:40:13 +01003767 if (gic_data_rdist()->lpi_enabled ||
3768 (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED))
Marc Zyngier11e37d32018-07-27 13:38:54 +01003769 return 0;
3770
3771 /*
3772 * From that point on, we only try to do some damage control.
3773 */
3774 pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
Shanker Donthineni6eb486b2018-03-21 20:58:49 -05003775 smp_processor_id());
3776 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
3777
3778 /* Disable LPIs */
3779 val &= ~GICR_CTLR_ENABLE_LPIS;
3780 writel_relaxed(val, rbase + GICR_CTLR);
3781
3782 /* Make sure any change to GICR_CTLR is observable by the GIC */
3783 dsb(sy);
3784
3785 /*
3786 * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
3787 * from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
3788 * Error out if we time out waiting for RWP to clear.
3789 */
3790 while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) {
3791 if (!timeout) {
3792 pr_err("CPU%d: Timeout while disabling LPIs\n",
3793 smp_processor_id());
3794 return -ETIMEDOUT;
3795 }
3796 udelay(1);
3797 timeout--;
3798 }
3799
3800 /*
3801 * After it has been written to 1, it is IMPLEMENTATION
3802 * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
3803 * cleared to 0. Error out if clearing the bit failed.
3804 */
3805 if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) {
3806 pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id());
3807 return -EBUSY;
3808 }
3809
3810 return 0;
3811}
3812
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003813int its_cpu_init(void)
3814{
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003815 if (!list_empty(&its_nodes)) {
Shanker Donthineni6eb486b2018-03-21 20:58:49 -05003816 int ret;
3817
3818 ret = redist_disable_lpis();
3819 if (ret)
3820 return ret;
3821
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003822 its_cpu_init_lpis();
Derek Basehore920181c2018-02-28 21:48:20 -08003823 its_cpu_init_collections();
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003824 }
3825
3826 return 0;
3827}
3828
Arvind Yadav935bba72017-06-22 16:05:30 +05303829static const struct of_device_id its_device_id[] = {
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003830 { .compatible = "arm,gic-v3-its", },
3831 {},
3832};
3833
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003834static int __init its_of_probe(struct device_node *node)
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003835{
3836 struct device_node *np;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003837 struct resource res;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003838
3839 for (np = of_find_matching_node(node, its_device_id); np;
3840 np = of_find_matching_node(np, its_device_id)) {
Stephen Boyd95a25622018-02-01 09:03:29 -08003841 if (!of_device_is_available(np))
3842 continue;
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003843 if (!of_property_read_bool(np, "msi-controller")) {
Rob Herringe81f54c2017-07-18 16:43:10 -05003844 pr_warn("%pOF: no msi-controller property, ITS ignored\n",
3845 np);
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003846 continue;
3847 }
3848
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003849 if (of_address_to_resource(np, 0, &res)) {
Rob Herringe81f54c2017-07-18 16:43:10 -05003850 pr_warn("%pOF: no regs?\n", np);
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003851 continue;
3852 }
3853
3854 its_probe_one(&res, &np->fwnode, of_node_to_nid(np));
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003855 }
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003856 return 0;
3857}
3858
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003859#ifdef CONFIG_ACPI
3860
3861#define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
3862
Robert Richterd1ce2632017-07-12 15:25:09 +02003863#ifdef CONFIG_ACPI_NUMA
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303864struct its_srat_map {
3865 /* numa node id */
3866 u32 numa_node;
3867 /* GIC ITS ID */
3868 u32 its_id;
3869};
3870
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003871static struct its_srat_map *its_srat_maps __initdata;
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303872static int its_in_srat __initdata;
3873
3874static int __init acpi_get_its_numa_node(u32 its_id)
3875{
3876 int i;
3877
3878 for (i = 0; i < its_in_srat; i++) {
3879 if (its_id == its_srat_maps[i].its_id)
3880 return its_srat_maps[i].numa_node;
3881 }
3882 return NUMA_NO_NODE;
3883}
3884
Keith Busch60574d12019-03-11 14:55:57 -06003885static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header,
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003886 const unsigned long end)
3887{
3888 return 0;
3889}
3890
Keith Busch60574d12019-03-11 14:55:57 -06003891static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header,
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303892 const unsigned long end)
3893{
3894 int node;
3895 struct acpi_srat_gic_its_affinity *its_affinity;
3896
3897 its_affinity = (struct acpi_srat_gic_its_affinity *)header;
3898 if (!its_affinity)
3899 return -EINVAL;
3900
3901 if (its_affinity->header.length < sizeof(*its_affinity)) {
3902 pr_err("SRAT: Invalid header length %d in ITS affinity\n",
3903 its_affinity->header.length);
3904 return -EINVAL;
3905 }
3906
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303907 node = acpi_map_pxm_to_node(its_affinity->proximity_domain);
3908
3909 if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
3910 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
3911 return 0;
3912 }
3913
3914 its_srat_maps[its_in_srat].numa_node = node;
3915 its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
3916 its_in_srat++;
3917 pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
3918 its_affinity->proximity_domain, its_affinity->its_id, node);
3919
3920 return 0;
3921}
3922
3923static void __init acpi_table_parse_srat_its(void)
3924{
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003925 int count;
3926
3927 count = acpi_table_parse_entries(ACPI_SIG_SRAT,
3928 sizeof(struct acpi_table_srat),
3929 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
3930 gic_acpi_match_srat_its, 0);
3931 if (count <= 0)
3932 return;
3933
Kees Cook6da2ec52018-06-12 13:55:00 -07003934 its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map),
3935 GFP_KERNEL);
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003936 if (!its_srat_maps) {
3937 pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n");
3938 return;
3939 }
3940
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303941 acpi_table_parse_entries(ACPI_SIG_SRAT,
3942 sizeof(struct acpi_table_srat),
3943 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
3944 gic_acpi_parse_srat_its, 0);
3945}
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003946
3947/* free the its_srat_maps after ITS probing */
3948static void __init acpi_its_srat_maps_free(void)
3949{
3950 kfree(its_srat_maps);
3951}
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303952#else
3953static void __init acpi_table_parse_srat_its(void) { }
3954static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003955static void __init acpi_its_srat_maps_free(void) { }
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303956#endif
3957
Keith Busch60574d12019-03-11 14:55:57 -06003958static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003959 const unsigned long end)
3960{
3961 struct acpi_madt_generic_translator *its_entry;
3962 struct fwnode_handle *dom_handle;
3963 struct resource res;
3964 int err;
3965
3966 its_entry = (struct acpi_madt_generic_translator *)header;
3967 memset(&res, 0, sizeof(res));
3968 res.start = its_entry->base_address;
3969 res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
3970 res.flags = IORESOURCE_MEM;
3971
Marc Zyngier5778cc72019-07-31 16:13:42 +01003972 dom_handle = irq_domain_alloc_fwnode(&res.start);
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003973 if (!dom_handle) {
3974 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
3975 &res.start);
3976 return -ENOMEM;
3977 }
3978
Shameer Kolothum8b4282e2018-02-13 15:20:50 +00003979 err = iort_register_domain_token(its_entry->translation_id, res.start,
3980 dom_handle);
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003981 if (err) {
3982 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
3983 &res.start, its_entry->translation_id);
3984 goto dom_err;
3985 }
3986
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303987 err = its_probe_one(&res, dom_handle,
3988 acpi_get_its_numa_node(its_entry->translation_id));
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003989 if (!err)
3990 return 0;
3991
3992 iort_deregister_domain_token(its_entry->translation_id);
3993dom_err:
3994 irq_domain_free_fwnode(dom_handle);
3995 return err;
3996}
3997
3998static void __init its_acpi_probe(void)
3999{
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05304000 acpi_table_parse_srat_its();
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02004001 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
4002 gic_acpi_parse_madt_its, 0);
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08004003 acpi_its_srat_maps_free();
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02004004}
4005#else
4006static void __init its_acpi_probe(void) { }
4007#endif
4008
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02004009int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
4010 struct irq_domain *parent_domain)
4011{
4012 struct device_node *of_node;
Marc Zyngier8fff27a2016-12-20 13:41:55 +00004013 struct its_node *its;
4014 bool has_v4 = false;
4015 int err;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02004016
4017 its_parent = parent_domain;
4018 of_node = to_of_node(handle);
4019 if (of_node)
4020 its_of_probe(of_node);
4021 else
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02004022 its_acpi_probe();
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00004023
4024 if (list_empty(&its_nodes)) {
4025 pr_warn("ITS: No ITS available, not enabling LPIs\n");
4026 return -ENXIO;
4027 }
4028
4029 gic_rdists = rdists;
Marc Zyngier11e37d32018-07-27 13:38:54 +01004030
4031 err = allocate_lpi_tables();
Marc Zyngier8fff27a2016-12-20 13:41:55 +00004032 if (err)
4033 return err;
4034
4035 list_for_each_entry(its, &its_nodes, entry)
Marc Zyngier0dd57fe2019-11-08 16:57:58 +00004036 has_v4 |= is_v4(its);
Marc Zyngier8fff27a2016-12-20 13:41:55 +00004037
4038 if (has_v4 & rdists->has_vlpis) {
Marc Zyngier3d63cb52016-12-20 15:31:54 +00004039 if (its_init_vpe_domain() ||
4040 its_init_v4(parent_domain, &its_vpe_domain_ops)) {
Marc Zyngier8fff27a2016-12-20 13:41:55 +00004041 rdists->has_vlpis = false;
4042 pr_err("ITS: Disabling GICv4 support\n");
4043 }
4044 }
4045
Derek Basehoredba0bc72018-02-28 21:48:18 -08004046 register_syscore_ops(&its_syscore_ops);
4047
Marc Zyngier8fff27a2016-12-20 13:41:55 +00004048 return 0;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00004049}