blob: 6a9066411cbca01ef61f2fdd918429a10e9f43e6 [file] [log] [blame]
Marc Zyngiercc2d3212014-11-24 14:35:11 +00001/*
Marc Zyngierd7276b82016-12-20 15:11:47 +00002 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
Marc Zyngiercc2d3212014-11-24 14:35:11 +00003 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +020018#include <linux/acpi.h>
Hanjun Guo8d3554b2017-03-07 20:39:59 +080019#include <linux/acpi_iort.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000020#include <linux/bitmap.h>
21#include <linux/cpu.h>
22#include <linux/delay.h>
Robin Murphy44bb7e22016-09-12 17:13:59 +010023#include <linux/dma-iommu.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000024#include <linux/interrupt.h>
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +020025#include <linux/irqdomain.h>
Marc Zyngier880cb3c2018-05-27 16:14:15 +010026#include <linux/list.h>
27#include <linux/list_sort.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000028#include <linux/log2.h>
29#include <linux/mm.h>
30#include <linux/msi.h>
31#include <linux/of.h>
32#include <linux/of_address.h>
33#include <linux/of_irq.h>
34#include <linux/of_pci.h>
35#include <linux/of_platform.h>
36#include <linux/percpu.h>
37#include <linux/slab.h>
Derek Basehoredba0bc72018-02-28 21:48:18 -080038#include <linux/syscore_ops.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000039
Joel Porquet41a83e062015-07-07 17:11:46 -040040#include <linux/irqchip.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000041#include <linux/irqchip/arm-gic-v3.h>
Marc Zyngierc808eea2016-12-20 09:31:20 +000042#include <linux/irqchip/arm-gic-v4.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000043
Marc Zyngiercc2d3212014-11-24 14:35:11 +000044#include <asm/cputype.h>
45#include <asm/exception.h>
46
Robert Richter67510cc2015-09-21 22:58:37 +020047#include "irq-gic-common.h"
48
Robert Richter94100972015-09-21 22:58:38 +020049#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
50#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +020051#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
Derek Basehoredba0bc72018-02-28 21:48:18 -080052#define ITS_FLAGS_SAVE_SUSPEND_STATE (1ULL << 3)
Marc Zyngiercc2d3212014-11-24 14:35:11 +000053
Marc Zyngierc48ed512014-11-24 14:35:12 +000054#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
Marc Zyngierc440a9d2018-07-27 15:40:13 +010055#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1)
Marc Zyngierc48ed512014-11-24 14:35:12 +000056
Marc Zyngiera13b0402016-12-19 17:15:24 +000057static u32 lpi_id_bits;
58
59/*
60 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
61 * deal with (one configuration byte per interrupt). PENDBASE has to
62 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
63 */
64#define LPI_NRBITS lpi_id_bits
65#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
66#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
67
68#define LPI_PROP_DEFAULT_PRIO 0xa0
69
Marc Zyngiercc2d3212014-11-24 14:35:11 +000070/*
71 * Collection structure - just an ID, and a redistributor address to
72 * ping. We use one per CPU as a bag of interrupts assigned to this
73 * CPU.
74 */
75struct its_collection {
76 u64 target_address;
77 u16 col_id;
78};
79
80/*
Shanker Donthineni93473592016-06-06 18:17:30 -050081 * The ITS_BASER structure - contains memory information, cached
82 * value of BASER register configuration and ITS page size.
Shanker Donthineni466b7d12016-03-09 22:10:49 -060083 */
84struct its_baser {
85 void *base;
86 u64 val;
87 u32 order;
Shanker Donthineni93473592016-06-06 18:17:30 -050088 u32 psz;
Shanker Donthineni466b7d12016-03-09 22:10:49 -060089};
90
Ard Biesheuvel558b0162017-10-17 17:55:56 +010091struct its_device;
92
Shanker Donthineni466b7d12016-03-09 22:10:49 -060093/*
Marc Zyngiercc2d3212014-11-24 14:35:11 +000094 * The ITS structure - contains most of the infrastructure, with the
Marc Zyngier841514a2015-07-28 14:46:20 +010095 * top-level MSI domain, the command queue, the collections, and the
96 * list of devices writing to it.
Marc Zyngiercc2d3212014-11-24 14:35:11 +000097 */
98struct its_node {
99 raw_spinlock_t lock;
100 struct list_head entry;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000101 void __iomem *base;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +0200102 phys_addr_t phys_base;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000103 struct its_cmd_block *cmd_base;
104 struct its_cmd_block *cmd_write;
Shanker Donthineni466b7d12016-03-09 22:10:49 -0600105 struct its_baser tables[GITS_BASER_NR_REGS];
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000106 struct its_collection *collections;
Ard Biesheuvel558b0162017-10-17 17:55:56 +0100107 struct fwnode_handle *fwnode_handle;
108 u64 (*get_msi_base)(struct its_device *its_dev);
Derek Basehoredba0bc72018-02-28 21:48:18 -0800109 u64 cbaser_save;
110 u32 ctlr_save;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000111 struct list_head its_device_list;
112 u64 flags;
Marc Zyngierdebf6d02017-10-08 18:44:42 +0100113 unsigned long list_nr;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000114 u32 ite_size;
Shanker Donthineni466b7d12016-03-09 22:10:49 -0600115 u32 device_ids;
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +0200116 int numa_node;
Ard Biesheuvel558b0162017-10-17 17:55:56 +0100117 unsigned int msi_domain_flags;
118 u32 pre_its_base; /* for Socionext Synquacer */
Marc Zyngier3dfa5762016-12-19 17:25:54 +0000119 bool is_v4;
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100120 int vlpi_redist_offset;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000121};
122
123#define ITS_ITT_ALIGN SZ_256
124
Shanker Donthineni32bd44d2017-10-07 15:43:48 -0500125/* The maximum number of VPEID bits supported by VLPI commands */
126#define ITS_MAX_VPEID_BITS (16)
127#define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
128
Shanker Donthineni2eca0d62016-02-16 18:00:36 -0600129/* Convert page order to size in bytes */
130#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
131
Marc Zyngier591e5be2015-07-17 10:46:42 +0100132struct event_lpi_map {
133 unsigned long *lpi_map;
134 u16 *col_map;
135 irq_hw_number_t lpi_base;
136 int nr_lpis;
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000137 struct mutex vlpi_lock;
138 struct its_vm *vm;
139 struct its_vlpi_map *vlpi_maps;
140 int nr_vlpis;
Marc Zyngier591e5be2015-07-17 10:46:42 +0100141};
142
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000143/*
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000144 * The ITS view of a device - belongs to an ITS, owns an interrupt
145 * translation table, and a list of interrupts. If it some of its
146 * LPIs are injected into a guest (GICv4), the event_map.vm field
147 * indicates which one.
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000148 */
149struct its_device {
150 struct list_head entry;
151 struct its_node *its;
Marc Zyngier591e5be2015-07-17 10:46:42 +0100152 struct event_lpi_map event_map;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000153 void *itt;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000154 u32 nr_ites;
155 u32 device_id;
156};
157
Marc Zyngier20b3d542016-12-20 15:23:22 +0000158static struct {
159 raw_spinlock_t lock;
160 struct its_device *dev;
161 struct its_vpe **vpes;
162 int next_victim;
163} vpe_proxy;
164
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000165static LIST_HEAD(its_nodes);
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +0200166static DEFINE_RAW_SPINLOCK(its_lock);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000167static struct rdists *gic_rdists;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +0200168static struct irq_domain *its_parent;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000169
Marc Zyngier3dfa5762016-12-19 17:25:54 +0000170static unsigned long its_list_map;
Marc Zyngier3171a472016-12-20 15:17:28 +0000171static u16 vmovp_seq_num;
172static DEFINE_RAW_SPINLOCK(vmovp_lock);
173
Marc Zyngier7d75bbb2016-12-20 13:55:54 +0000174static DEFINE_IDA(its_vpeid_ida);
Marc Zyngier3dfa5762016-12-19 17:25:54 +0000175
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000176#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
Marc Zyngier11e37d32018-07-27 13:38:54 +0100177#define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000178#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
Marc Zyngiere643d802016-12-20 15:09:31 +0000179#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000180
Marc Zyngier591e5be2015-07-17 10:46:42 +0100181static struct its_collection *dev_event_to_col(struct its_device *its_dev,
182 u32 event)
183{
184 struct its_node *its = its_dev->its;
185
186 return its->collections + its_dev->event_map.col_map[event];
187}
188
Marc Zyngier83559b42018-06-22 10:52:52 +0100189static struct its_collection *valid_col(struct its_collection *col)
190{
191 if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(0, 15)))
192 return NULL;
193
194 return col;
195}
196
Marc Zyngier205e0652018-06-22 10:52:53 +0100197static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
198{
199 if (valid_col(its->collections + vpe->col_idx))
200 return vpe;
201
202 return NULL;
203}
204
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000205/*
206 * ITS command descriptors - parameters to be encoded in a command
207 * block.
208 */
209struct its_cmd_desc {
210 union {
211 struct {
212 struct its_device *dev;
213 u32 event_id;
214 } its_inv_cmd;
215
216 struct {
217 struct its_device *dev;
218 u32 event_id;
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000219 } its_clear_cmd;
220
221 struct {
222 struct its_device *dev;
223 u32 event_id;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000224 } its_int_cmd;
225
226 struct {
227 struct its_device *dev;
228 int valid;
229 } its_mapd_cmd;
230
231 struct {
232 struct its_collection *col;
233 int valid;
234 } its_mapc_cmd;
235
236 struct {
237 struct its_device *dev;
238 u32 phys_id;
239 u32 event_id;
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000240 } its_mapti_cmd;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000241
242 struct {
243 struct its_device *dev;
244 struct its_collection *col;
Marc Zyngier591e5be2015-07-17 10:46:42 +0100245 u32 event_id;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000246 } its_movi_cmd;
247
248 struct {
249 struct its_device *dev;
250 u32 event_id;
251 } its_discard_cmd;
252
253 struct {
254 struct its_collection *col;
255 } its_invall_cmd;
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000256
257 struct {
258 struct its_vpe *vpe;
Marc Zyngiereb781922016-12-20 14:47:05 +0000259 } its_vinvall_cmd;
260
261 struct {
262 struct its_vpe *vpe;
263 struct its_collection *col;
264 bool valid;
265 } its_vmapp_cmd;
266
267 struct {
268 struct its_vpe *vpe;
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000269 struct its_device *dev;
270 u32 virt_id;
271 u32 event_id;
272 bool db_enabled;
273 } its_vmapti_cmd;
274
275 struct {
276 struct its_vpe *vpe;
277 struct its_device *dev;
278 u32 event_id;
279 bool db_enabled;
280 } its_vmovi_cmd;
Marc Zyngier3171a472016-12-20 15:17:28 +0000281
282 struct {
283 struct its_vpe *vpe;
284 struct its_collection *col;
285 u16 seq_num;
286 u16 its_list;
287 } its_vmovp_cmd;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000288 };
289};
290
291/*
292 * The ITS command block, which is what the ITS actually parses.
293 */
294struct its_cmd_block {
295 u64 raw_cmd[4];
296};
297
298#define ITS_CMD_QUEUE_SZ SZ_64K
299#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
300
Marc Zyngier67047f902017-07-28 21:16:58 +0100301typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
302 struct its_cmd_block *,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000303 struct its_cmd_desc *);
304
Marc Zyngier67047f902017-07-28 21:16:58 +0100305typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
306 struct its_cmd_block *,
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000307 struct its_cmd_desc *);
308
Marc Zyngier4d36f132016-12-19 17:11:52 +0000309static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
310{
311 u64 mask = GENMASK_ULL(h, l);
312 *raw_cmd &= ~mask;
313 *raw_cmd |= (val << l) & mask;
314}
315
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000316static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
317{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000318 its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000319}
320
321static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
322{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000323 its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000324}
325
326static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
327{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000328 its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000329}
330
331static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
332{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000333 its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000334}
335
336static void its_encode_size(struct its_cmd_block *cmd, u8 size)
337{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000338 its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000339}
340
341static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
342{
Shanker Donthineni30ae9612017-10-09 11:46:55 -0500343 its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000344}
345
346static void its_encode_valid(struct its_cmd_block *cmd, int valid)
347{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000348 its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000349}
350
351static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
352{
Shanker Donthineni30ae9612017-10-09 11:46:55 -0500353 its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000354}
355
356static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
357{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000358 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000359}
360
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000361static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
362{
363 its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
364}
365
366static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
367{
368 its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
369}
370
371static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
372{
373 its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
374}
375
376static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
377{
378 its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
379}
380
Marc Zyngier3171a472016-12-20 15:17:28 +0000381static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
382{
383 its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
384}
385
386static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
387{
388 its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
389}
390
Marc Zyngiereb781922016-12-20 14:47:05 +0000391static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
392{
Shanker Donthineni30ae9612017-10-09 11:46:55 -0500393 its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
Marc Zyngiereb781922016-12-20 14:47:05 +0000394}
395
396static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
397{
398 its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
399}
400
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000401static inline void its_fixup_cmd(struct its_cmd_block *cmd)
402{
403 /* Let's fixup BE commands */
404 cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]);
405 cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]);
406 cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]);
407 cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]);
408}
409
Marc Zyngier67047f902017-07-28 21:16:58 +0100410static struct its_collection *its_build_mapd_cmd(struct its_node *its,
411 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000412 struct its_cmd_desc *desc)
413{
414 unsigned long itt_addr;
Marc Zyngierc8481262014-12-12 10:51:24 +0000415 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000416
417 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
418 itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
419
420 its_encode_cmd(cmd, GITS_CMD_MAPD);
421 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
422 its_encode_size(cmd, size - 1);
423 its_encode_itt(cmd, itt_addr);
424 its_encode_valid(cmd, desc->its_mapd_cmd.valid);
425
426 its_fixup_cmd(cmd);
427
Marc Zyngier591e5be2015-07-17 10:46:42 +0100428 return NULL;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000429}
430
Marc Zyngier67047f902017-07-28 21:16:58 +0100431static struct its_collection *its_build_mapc_cmd(struct its_node *its,
432 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000433 struct its_cmd_desc *desc)
434{
435 its_encode_cmd(cmd, GITS_CMD_MAPC);
436 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
437 its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
438 its_encode_valid(cmd, desc->its_mapc_cmd.valid);
439
440 its_fixup_cmd(cmd);
441
442 return desc->its_mapc_cmd.col;
443}
444
Marc Zyngier67047f902017-07-28 21:16:58 +0100445static struct its_collection *its_build_mapti_cmd(struct its_node *its,
446 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000447 struct its_cmd_desc *desc)
448{
Marc Zyngier591e5be2015-07-17 10:46:42 +0100449 struct its_collection *col;
450
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000451 col = dev_event_to_col(desc->its_mapti_cmd.dev,
452 desc->its_mapti_cmd.event_id);
Marc Zyngier591e5be2015-07-17 10:46:42 +0100453
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000454 its_encode_cmd(cmd, GITS_CMD_MAPTI);
455 its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
456 its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
457 its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
Marc Zyngier591e5be2015-07-17 10:46:42 +0100458 its_encode_collection(cmd, col->col_id);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000459
460 its_fixup_cmd(cmd);
461
Marc Zyngier83559b42018-06-22 10:52:52 +0100462 return valid_col(col);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000463}
464
Marc Zyngier67047f902017-07-28 21:16:58 +0100465static struct its_collection *its_build_movi_cmd(struct its_node *its,
466 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000467 struct its_cmd_desc *desc)
468{
Marc Zyngier591e5be2015-07-17 10:46:42 +0100469 struct its_collection *col;
470
471 col = dev_event_to_col(desc->its_movi_cmd.dev,
472 desc->its_movi_cmd.event_id);
473
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000474 its_encode_cmd(cmd, GITS_CMD_MOVI);
475 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
Marc Zyngier591e5be2015-07-17 10:46:42 +0100476 its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000477 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
478
479 its_fixup_cmd(cmd);
480
Marc Zyngier83559b42018-06-22 10:52:52 +0100481 return valid_col(col);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000482}
483
Marc Zyngier67047f902017-07-28 21:16:58 +0100484static struct its_collection *its_build_discard_cmd(struct its_node *its,
485 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000486 struct its_cmd_desc *desc)
487{
Marc Zyngier591e5be2015-07-17 10:46:42 +0100488 struct its_collection *col;
489
490 col = dev_event_to_col(desc->its_discard_cmd.dev,
491 desc->its_discard_cmd.event_id);
492
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000493 its_encode_cmd(cmd, GITS_CMD_DISCARD);
494 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
495 its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
496
497 its_fixup_cmd(cmd);
498
Marc Zyngier83559b42018-06-22 10:52:52 +0100499 return valid_col(col);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000500}
501
Marc Zyngier67047f902017-07-28 21:16:58 +0100502static struct its_collection *its_build_inv_cmd(struct its_node *its,
503 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000504 struct its_cmd_desc *desc)
505{
Marc Zyngier591e5be2015-07-17 10:46:42 +0100506 struct its_collection *col;
507
508 col = dev_event_to_col(desc->its_inv_cmd.dev,
509 desc->its_inv_cmd.event_id);
510
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000511 its_encode_cmd(cmd, GITS_CMD_INV);
512 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
513 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
514
515 its_fixup_cmd(cmd);
516
Marc Zyngier83559b42018-06-22 10:52:52 +0100517 return valid_col(col);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000518}
519
Marc Zyngier67047f902017-07-28 21:16:58 +0100520static struct its_collection *its_build_int_cmd(struct its_node *its,
521 struct its_cmd_block *cmd,
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000522 struct its_cmd_desc *desc)
523{
524 struct its_collection *col;
525
526 col = dev_event_to_col(desc->its_int_cmd.dev,
527 desc->its_int_cmd.event_id);
528
529 its_encode_cmd(cmd, GITS_CMD_INT);
530 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
531 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
532
533 its_fixup_cmd(cmd);
534
Marc Zyngier83559b42018-06-22 10:52:52 +0100535 return valid_col(col);
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000536}
537
Marc Zyngier67047f902017-07-28 21:16:58 +0100538static struct its_collection *its_build_clear_cmd(struct its_node *its,
539 struct its_cmd_block *cmd,
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000540 struct its_cmd_desc *desc)
541{
542 struct its_collection *col;
543
544 col = dev_event_to_col(desc->its_clear_cmd.dev,
545 desc->its_clear_cmd.event_id);
546
547 its_encode_cmd(cmd, GITS_CMD_CLEAR);
548 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
549 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
550
551 its_fixup_cmd(cmd);
552
Marc Zyngier83559b42018-06-22 10:52:52 +0100553 return valid_col(col);
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000554}
555
Marc Zyngier67047f902017-07-28 21:16:58 +0100556static struct its_collection *its_build_invall_cmd(struct its_node *its,
557 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000558 struct its_cmd_desc *desc)
559{
560 its_encode_cmd(cmd, GITS_CMD_INVALL);
561 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
562
563 its_fixup_cmd(cmd);
564
565 return NULL;
566}
567
Marc Zyngier67047f902017-07-28 21:16:58 +0100568static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
569 struct its_cmd_block *cmd,
Marc Zyngiereb781922016-12-20 14:47:05 +0000570 struct its_cmd_desc *desc)
571{
572 its_encode_cmd(cmd, GITS_CMD_VINVALL);
573 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
574
575 its_fixup_cmd(cmd);
576
Marc Zyngier205e0652018-06-22 10:52:53 +0100577 return valid_vpe(its, desc->its_vinvall_cmd.vpe);
Marc Zyngiereb781922016-12-20 14:47:05 +0000578}
579
Marc Zyngier67047f902017-07-28 21:16:58 +0100580static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
581 struct its_cmd_block *cmd,
Marc Zyngiereb781922016-12-20 14:47:05 +0000582 struct its_cmd_desc *desc)
583{
584 unsigned long vpt_addr;
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100585 u64 target;
Marc Zyngiereb781922016-12-20 14:47:05 +0000586
587 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100588 target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
Marc Zyngiereb781922016-12-20 14:47:05 +0000589
590 its_encode_cmd(cmd, GITS_CMD_VMAPP);
591 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
592 its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100593 its_encode_target(cmd, target);
Marc Zyngiereb781922016-12-20 14:47:05 +0000594 its_encode_vpt_addr(cmd, vpt_addr);
595 its_encode_vpt_size(cmd, LPI_NRBITS - 1);
596
597 its_fixup_cmd(cmd);
598
Marc Zyngier205e0652018-06-22 10:52:53 +0100599 return valid_vpe(its, desc->its_vmapp_cmd.vpe);
Marc Zyngiereb781922016-12-20 14:47:05 +0000600}
601
Marc Zyngier67047f902017-07-28 21:16:58 +0100602static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
603 struct its_cmd_block *cmd,
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000604 struct its_cmd_desc *desc)
605{
606 u32 db;
607
608 if (desc->its_vmapti_cmd.db_enabled)
609 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
610 else
611 db = 1023;
612
613 its_encode_cmd(cmd, GITS_CMD_VMAPTI);
614 its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
615 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
616 its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
617 its_encode_db_phys_id(cmd, db);
618 its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
619
620 its_fixup_cmd(cmd);
621
Marc Zyngier205e0652018-06-22 10:52:53 +0100622 return valid_vpe(its, desc->its_vmapti_cmd.vpe);
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000623}
624
Marc Zyngier67047f902017-07-28 21:16:58 +0100625static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
626 struct its_cmd_block *cmd,
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000627 struct its_cmd_desc *desc)
628{
629 u32 db;
630
631 if (desc->its_vmovi_cmd.db_enabled)
632 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
633 else
634 db = 1023;
635
636 its_encode_cmd(cmd, GITS_CMD_VMOVI);
637 its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
638 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
639 its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
640 its_encode_db_phys_id(cmd, db);
641 its_encode_db_valid(cmd, true);
642
643 its_fixup_cmd(cmd);
644
Marc Zyngier205e0652018-06-22 10:52:53 +0100645 return valid_vpe(its, desc->its_vmovi_cmd.vpe);
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000646}
647
Marc Zyngier67047f902017-07-28 21:16:58 +0100648static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
649 struct its_cmd_block *cmd,
Marc Zyngier3171a472016-12-20 15:17:28 +0000650 struct its_cmd_desc *desc)
651{
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100652 u64 target;
653
654 target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
Marc Zyngier3171a472016-12-20 15:17:28 +0000655 its_encode_cmd(cmd, GITS_CMD_VMOVP);
656 its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
657 its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
658 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100659 its_encode_target(cmd, target);
Marc Zyngier3171a472016-12-20 15:17:28 +0000660
661 its_fixup_cmd(cmd);
662
Marc Zyngier205e0652018-06-22 10:52:53 +0100663 return valid_vpe(its, desc->its_vmovp_cmd.vpe);
Marc Zyngier3171a472016-12-20 15:17:28 +0000664}
665
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000666static u64 its_cmd_ptr_to_offset(struct its_node *its,
667 struct its_cmd_block *ptr)
668{
669 return (ptr - its->cmd_base) * sizeof(*ptr);
670}
671
672static int its_queue_full(struct its_node *its)
673{
674 int widx;
675 int ridx;
676
677 widx = its->cmd_write - its->cmd_base;
678 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
679
680 /* This is incredibly unlikely to happen, unless the ITS locks up. */
681 if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
682 return 1;
683
684 return 0;
685}
686
687static struct its_cmd_block *its_allocate_entry(struct its_node *its)
688{
689 struct its_cmd_block *cmd;
690 u32 count = 1000000; /* 1s! */
691
692 while (its_queue_full(its)) {
693 count--;
694 if (!count) {
695 pr_err_ratelimited("ITS queue not draining\n");
696 return NULL;
697 }
698 cpu_relax();
699 udelay(1);
700 }
701
702 cmd = its->cmd_write++;
703
704 /* Handle queue wrapping */
705 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
706 its->cmd_write = its->cmd_base;
707
Marc Zyngier34d677a2016-12-19 17:16:45 +0000708 /* Clear command */
709 cmd->raw_cmd[0] = 0;
710 cmd->raw_cmd[1] = 0;
711 cmd->raw_cmd[2] = 0;
712 cmd->raw_cmd[3] = 0;
713
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000714 return cmd;
715}
716
717static struct its_cmd_block *its_post_commands(struct its_node *its)
718{
719 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
720
721 writel_relaxed(wr, its->base + GITS_CWRITER);
722
723 return its->cmd_write;
724}
725
726static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
727{
728 /*
729 * Make sure the commands written to memory are observable by
730 * the ITS.
731 */
732 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
Vladimir Murzin328191c2016-11-02 11:54:05 +0000733 gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000734 else
735 dsb(ishst);
736}
737
Marc Zyngiera19b4622017-08-04 17:45:50 +0100738static int its_wait_for_range_completion(struct its_node *its,
739 struct its_cmd_block *from,
740 struct its_cmd_block *to)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000741{
742 u64 rd_idx, from_idx, to_idx;
743 u32 count = 1000000; /* 1s! */
744
745 from_idx = its_cmd_ptr_to_offset(its, from);
746 to_idx = its_cmd_ptr_to_offset(its, to);
747
748 while (1) {
749 rd_idx = readl_relaxed(its->base + GITS_CREADR);
Marc Zyngier9bdd8b12017-08-19 10:16:02 +0100750
751 /* Direct case */
752 if (from_idx < to_idx && rd_idx >= to_idx)
753 break;
754
755 /* Wrapped case */
756 if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000757 break;
758
759 count--;
760 if (!count) {
Marc Zyngiera19b4622017-08-04 17:45:50 +0100761 pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n",
762 from_idx, to_idx, rd_idx);
763 return -1;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000764 }
765 cpu_relax();
766 udelay(1);
767 }
Marc Zyngiera19b4622017-08-04 17:45:50 +0100768
769 return 0;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000770}
771
Marc Zyngiere4f90942016-12-19 17:56:32 +0000772/* Warning, macro hell follows */
773#define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
774void name(struct its_node *its, \
775 buildtype builder, \
776 struct its_cmd_desc *desc) \
777{ \
778 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
779 synctype *sync_obj; \
780 unsigned long flags; \
781 \
782 raw_spin_lock_irqsave(&its->lock, flags); \
783 \
784 cmd = its_allocate_entry(its); \
785 if (!cmd) { /* We're soooooo screewed... */ \
786 raw_spin_unlock_irqrestore(&its->lock, flags); \
787 return; \
788 } \
Marc Zyngier67047f902017-07-28 21:16:58 +0100789 sync_obj = builder(its, cmd, desc); \
Marc Zyngiere4f90942016-12-19 17:56:32 +0000790 its_flush_cmd(its, cmd); \
791 \
792 if (sync_obj) { \
793 sync_cmd = its_allocate_entry(its); \
794 if (!sync_cmd) \
795 goto post; \
796 \
Marc Zyngier67047f902017-07-28 21:16:58 +0100797 buildfn(its, sync_cmd, sync_obj); \
Marc Zyngiere4f90942016-12-19 17:56:32 +0000798 its_flush_cmd(its, sync_cmd); \
799 } \
800 \
801post: \
802 next_cmd = its_post_commands(its); \
803 raw_spin_unlock_irqrestore(&its->lock, flags); \
804 \
Marc Zyngiera19b4622017-08-04 17:45:50 +0100805 if (its_wait_for_range_completion(its, cmd, next_cmd)) \
806 pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000807}
808
Marc Zyngier67047f902017-07-28 21:16:58 +0100809static void its_build_sync_cmd(struct its_node *its,
810 struct its_cmd_block *sync_cmd,
Marc Zyngiere4f90942016-12-19 17:56:32 +0000811 struct its_collection *sync_col)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000812{
Marc Zyngiere4f90942016-12-19 17:56:32 +0000813 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
814 its_encode_target(sync_cmd, sync_col->target_address);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000815
Marc Zyngiere4f90942016-12-19 17:56:32 +0000816 its_fixup_cmd(sync_cmd);
817}
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000818
Marc Zyngiere4f90942016-12-19 17:56:32 +0000819static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
820 struct its_collection, its_build_sync_cmd)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000821
Marc Zyngier67047f902017-07-28 21:16:58 +0100822static void its_build_vsync_cmd(struct its_node *its,
823 struct its_cmd_block *sync_cmd,
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000824 struct its_vpe *sync_vpe)
825{
826 its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
827 its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000828
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000829 its_fixup_cmd(sync_cmd);
830}
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000831
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000832static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
833 struct its_vpe, its_build_vsync_cmd)
834
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000835static void its_send_int(struct its_device *dev, u32 event_id)
836{
837 struct its_cmd_desc desc;
838
839 desc.its_int_cmd.dev = dev;
840 desc.its_int_cmd.event_id = event_id;
841
842 its_send_single_command(dev->its, its_build_int_cmd, &desc);
843}
844
845static void its_send_clear(struct its_device *dev, u32 event_id)
846{
847 struct its_cmd_desc desc;
848
849 desc.its_clear_cmd.dev = dev;
850 desc.its_clear_cmd.event_id = event_id;
851
852 its_send_single_command(dev->its, its_build_clear_cmd, &desc);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000853}
854
855static void its_send_inv(struct its_device *dev, u32 event_id)
856{
857 struct its_cmd_desc desc;
858
859 desc.its_inv_cmd.dev = dev;
860 desc.its_inv_cmd.event_id = event_id;
861
862 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
863}
864
865static void its_send_mapd(struct its_device *dev, int valid)
866{
867 struct its_cmd_desc desc;
868
869 desc.its_mapd_cmd.dev = dev;
870 desc.its_mapd_cmd.valid = !!valid;
871
872 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
873}
874
875static void its_send_mapc(struct its_node *its, struct its_collection *col,
876 int valid)
877{
878 struct its_cmd_desc desc;
879
880 desc.its_mapc_cmd.col = col;
881 desc.its_mapc_cmd.valid = !!valid;
882
883 its_send_single_command(its, its_build_mapc_cmd, &desc);
884}
885
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000886static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000887{
888 struct its_cmd_desc desc;
889
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000890 desc.its_mapti_cmd.dev = dev;
891 desc.its_mapti_cmd.phys_id = irq_id;
892 desc.its_mapti_cmd.event_id = id;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000893
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000894 its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000895}
896
897static void its_send_movi(struct its_device *dev,
898 struct its_collection *col, u32 id)
899{
900 struct its_cmd_desc desc;
901
902 desc.its_movi_cmd.dev = dev;
903 desc.its_movi_cmd.col = col;
Marc Zyngier591e5be2015-07-17 10:46:42 +0100904 desc.its_movi_cmd.event_id = id;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000905
906 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
907}
908
909static void its_send_discard(struct its_device *dev, u32 id)
910{
911 struct its_cmd_desc desc;
912
913 desc.its_discard_cmd.dev = dev;
914 desc.its_discard_cmd.event_id = id;
915
916 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
917}
918
919static void its_send_invall(struct its_node *its, struct its_collection *col)
920{
921 struct its_cmd_desc desc;
922
923 desc.its_invall_cmd.col = col;
924
925 its_send_single_command(its, its_build_invall_cmd, &desc);
926}
Marc Zyngierc48ed512014-11-24 14:35:12 +0000927
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000928static void its_send_vmapti(struct its_device *dev, u32 id)
929{
930 struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
931 struct its_cmd_desc desc;
932
933 desc.its_vmapti_cmd.vpe = map->vpe;
934 desc.its_vmapti_cmd.dev = dev;
935 desc.its_vmapti_cmd.virt_id = map->vintid;
936 desc.its_vmapti_cmd.event_id = id;
937 desc.its_vmapti_cmd.db_enabled = map->db_enabled;
938
939 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
940}
941
942static void its_send_vmovi(struct its_device *dev, u32 id)
943{
944 struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
945 struct its_cmd_desc desc;
946
947 desc.its_vmovi_cmd.vpe = map->vpe;
948 desc.its_vmovi_cmd.dev = dev;
949 desc.its_vmovi_cmd.event_id = id;
950 desc.its_vmovi_cmd.db_enabled = map->db_enabled;
951
952 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
953}
954
Marc Zyngier75fd9512017-10-08 18:46:39 +0100955static void its_send_vmapp(struct its_node *its,
956 struct its_vpe *vpe, bool valid)
Marc Zyngiereb781922016-12-20 14:47:05 +0000957{
958 struct its_cmd_desc desc;
Marc Zyngiereb781922016-12-20 14:47:05 +0000959
960 desc.its_vmapp_cmd.vpe = vpe;
961 desc.its_vmapp_cmd.valid = valid;
Marc Zyngier75fd9512017-10-08 18:46:39 +0100962 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
Marc Zyngiereb781922016-12-20 14:47:05 +0000963
Marc Zyngier75fd9512017-10-08 18:46:39 +0100964 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
Marc Zyngiereb781922016-12-20 14:47:05 +0000965}
966
Marc Zyngier3171a472016-12-20 15:17:28 +0000967static void its_send_vmovp(struct its_vpe *vpe)
968{
969 struct its_cmd_desc desc;
970 struct its_node *its;
971 unsigned long flags;
972 int col_id = vpe->col_idx;
973
974 desc.its_vmovp_cmd.vpe = vpe;
975 desc.its_vmovp_cmd.its_list = (u16)its_list_map;
976
977 if (!its_list_map) {
978 its = list_first_entry(&its_nodes, struct its_node, entry);
979 desc.its_vmovp_cmd.seq_num = 0;
980 desc.its_vmovp_cmd.col = &its->collections[col_id];
981 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
982 return;
983 }
984
985 /*
986 * Yet another marvel of the architecture. If using the
987 * its_list "feature", we need to make sure that all ITSs
988 * receive all VMOVP commands in the same order. The only way
989 * to guarantee this is to make vmovp a serialization point.
990 *
991 * Wall <-- Head.
992 */
993 raw_spin_lock_irqsave(&vmovp_lock, flags);
994
995 desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
996
997 /* Emit VMOVPs */
998 list_for_each_entry(its, &its_nodes, entry) {
999 if (!its->is_v4)
1000 continue;
1001
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001002 if (!vpe->its_vm->vlpi_count[its->list_nr])
1003 continue;
1004
Marc Zyngier3171a472016-12-20 15:17:28 +00001005 desc.its_vmovp_cmd.col = &its->collections[col_id];
1006 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1007 }
1008
1009 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1010}
1011
Marc Zyngier40619a22017-10-08 15:16:09 +01001012static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
Marc Zyngiereb781922016-12-20 14:47:05 +00001013{
1014 struct its_cmd_desc desc;
Marc Zyngiereb781922016-12-20 14:47:05 +00001015
1016 desc.its_vinvall_cmd.vpe = vpe;
Marc Zyngier40619a22017-10-08 15:16:09 +01001017 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
Marc Zyngiereb781922016-12-20 14:47:05 +00001018}
1019
Marc Zyngierc48ed512014-11-24 14:35:12 +00001020/*
1021 * irqchip functions - assumes MSI, mostly.
1022 */
1023
1024static inline u32 its_get_event_id(struct irq_data *d)
1025{
1026 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
Marc Zyngier591e5be2015-07-17 10:46:42 +01001027 return d->hwirq - its_dev->event_map.lpi_base;
Marc Zyngierc48ed512014-11-24 14:35:12 +00001028}
1029
Marc Zyngier015ec032016-12-20 09:54:57 +00001030static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
Marc Zyngierc48ed512014-11-24 14:35:12 +00001031{
Marc Zyngier015ec032016-12-20 09:54:57 +00001032 irq_hw_number_t hwirq;
Marc Zyngiere1a2e202018-07-27 14:36:00 +01001033 void *va;
Marc Zyngieradcdb942016-12-19 19:18:13 +00001034 u8 *cfg;
Marc Zyngierc48ed512014-11-24 14:35:12 +00001035
Marc Zyngier015ec032016-12-20 09:54:57 +00001036 if (irqd_is_forwarded_to_vcpu(d)) {
1037 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1038 u32 event = its_get_event_id(d);
Marc Zyngierd4d7b4a2017-10-26 10:44:07 +01001039 struct its_vlpi_map *map;
Marc Zyngier015ec032016-12-20 09:54:57 +00001040
Marc Zyngiere1a2e202018-07-27 14:36:00 +01001041 va = page_address(its_dev->event_map.vm->vprop_page);
Marc Zyngierd4d7b4a2017-10-26 10:44:07 +01001042 map = &its_dev->event_map.vlpi_maps[event];
1043 hwirq = map->vintid;
1044
1045 /* Remember the updated property */
1046 map->properties &= ~clr;
1047 map->properties |= set | LPI_PROP_GROUP1;
Marc Zyngier015ec032016-12-20 09:54:57 +00001048 } else {
Marc Zyngiere1a2e202018-07-27 14:36:00 +01001049 va = gic_rdists->prop_table_va;
Marc Zyngier015ec032016-12-20 09:54:57 +00001050 hwirq = d->hwirq;
1051 }
Marc Zyngieradcdb942016-12-19 19:18:13 +00001052
Marc Zyngiere1a2e202018-07-27 14:36:00 +01001053 cfg = va + hwirq - 8192;
Marc Zyngieradcdb942016-12-19 19:18:13 +00001054 *cfg &= ~clr;
Marc Zyngier015ec032016-12-20 09:54:57 +00001055 *cfg |= set | LPI_PROP_GROUP1;
Marc Zyngierc48ed512014-11-24 14:35:12 +00001056
1057 /*
1058 * Make the above write visible to the redistributors.
1059 * And yes, we're flushing exactly: One. Single. Byte.
1060 * Humpf...
1061 */
1062 if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
Vladimir Murzin328191c2016-11-02 11:54:05 +00001063 gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
Marc Zyngierc48ed512014-11-24 14:35:12 +00001064 else
1065 dsb(ishst);
Marc Zyngier015ec032016-12-20 09:54:57 +00001066}
1067
1068static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1069{
1070 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1071
1072 lpi_write_config(d, clr, set);
Marc Zyngieradcdb942016-12-19 19:18:13 +00001073 its_send_inv(its_dev, its_get_event_id(d));
Marc Zyngierc48ed512014-11-24 14:35:12 +00001074}
1075
Marc Zyngier015ec032016-12-20 09:54:57 +00001076static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1077{
1078 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1079 u32 event = its_get_event_id(d);
1080
1081 if (its_dev->event_map.vlpi_maps[event].db_enabled == enable)
1082 return;
1083
1084 its_dev->event_map.vlpi_maps[event].db_enabled = enable;
1085
1086 /*
1087 * More fun with the architecture:
1088 *
1089 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1090 * value or to 1023, depending on the enable bit. But that
1091 * would be issueing a mapping for an /existing/ DevID+EventID
1092 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1093 * to the /same/ vPE, using this opportunity to adjust the
1094 * doorbell. Mouahahahaha. We loves it, Precious.
1095 */
1096 its_send_vmovi(its_dev, event);
Marc Zyngierc48ed512014-11-24 14:35:12 +00001097}
1098
1099static void its_mask_irq(struct irq_data *d)
1100{
Marc Zyngier015ec032016-12-20 09:54:57 +00001101 if (irqd_is_forwarded_to_vcpu(d))
1102 its_vlpi_set_doorbell(d, false);
1103
Marc Zyngieradcdb942016-12-19 19:18:13 +00001104 lpi_update_config(d, LPI_PROP_ENABLED, 0);
Marc Zyngierc48ed512014-11-24 14:35:12 +00001105}
1106
1107static void its_unmask_irq(struct irq_data *d)
1108{
Marc Zyngier015ec032016-12-20 09:54:57 +00001109 if (irqd_is_forwarded_to_vcpu(d))
1110 its_vlpi_set_doorbell(d, true);
1111
Marc Zyngieradcdb942016-12-19 19:18:13 +00001112 lpi_update_config(d, 0, LPI_PROP_ENABLED);
Marc Zyngierc48ed512014-11-24 14:35:12 +00001113}
1114
Marc Zyngierc48ed512014-11-24 14:35:12 +00001115static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1116 bool force)
1117{
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02001118 unsigned int cpu;
1119 const struct cpumask *cpu_mask = cpu_online_mask;
Marc Zyngierc48ed512014-11-24 14:35:12 +00001120 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1121 struct its_collection *target_col;
1122 u32 id = its_get_event_id(d);
1123
Marc Zyngier015ec032016-12-20 09:54:57 +00001124 /* A forwarded interrupt should use irq_set_vcpu_affinity */
1125 if (irqd_is_forwarded_to_vcpu(d))
1126 return -EINVAL;
1127
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02001128 /* lpi cannot be routed to a redistributor that is on a foreign node */
1129 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
1130 if (its_dev->its->numa_node >= 0) {
1131 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
1132 if (!cpumask_intersects(mask_val, cpu_mask))
1133 return -EINVAL;
1134 }
1135 }
1136
1137 cpu = cpumask_any_and(mask_val, cpu_mask);
1138
Marc Zyngierc48ed512014-11-24 14:35:12 +00001139 if (cpu >= nr_cpu_ids)
1140 return -EINVAL;
1141
MaJun8b8d94a2017-05-18 16:19:13 +08001142 /* don't set the affinity when the target cpu is same as current one */
1143 if (cpu != its_dev->event_map.col_map[id]) {
1144 target_col = &its_dev->its->collections[cpu];
1145 its_send_movi(its_dev, target_col, id);
1146 its_dev->event_map.col_map[id] = cpu;
Marc Zyngier0d224d32017-08-18 09:39:18 +01001147 irq_data_update_effective_affinity(d, cpumask_of(cpu));
MaJun8b8d94a2017-05-18 16:19:13 +08001148 }
Marc Zyngierc48ed512014-11-24 14:35:12 +00001149
1150 return IRQ_SET_MASK_OK_DONE;
1151}
1152
Ard Biesheuvel558b0162017-10-17 17:55:56 +01001153static u64 its_irq_get_msi_base(struct its_device *its_dev)
1154{
1155 struct its_node *its = its_dev->its;
1156
1157 return its->phys_base + GITS_TRANSLATER;
1158}
1159
Marc Zyngierb48ac832014-11-24 14:35:16 +00001160static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1161{
1162 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1163 struct its_node *its;
1164 u64 addr;
1165
1166 its = its_dev->its;
Ard Biesheuvel558b0162017-10-17 17:55:56 +01001167 addr = its->get_msi_base(its_dev);
Marc Zyngierb48ac832014-11-24 14:35:16 +00001168
Vladimir Murzinb11283e2016-11-02 11:54:03 +00001169 msg->address_lo = lower_32_bits(addr);
1170 msg->address_hi = upper_32_bits(addr);
Marc Zyngierb48ac832014-11-24 14:35:16 +00001171 msg->data = its_get_event_id(d);
Robin Murphy44bb7e22016-09-12 17:13:59 +01001172
1173 iommu_dma_map_msi_msg(d->irq, msg);
Marc Zyngierb48ac832014-11-24 14:35:16 +00001174}
1175
Marc Zyngier8d85dce2016-12-19 18:02:13 +00001176static int its_irq_set_irqchip_state(struct irq_data *d,
1177 enum irqchip_irq_state which,
1178 bool state)
1179{
1180 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1181 u32 event = its_get_event_id(d);
1182
1183 if (which != IRQCHIP_STATE_PENDING)
1184 return -EINVAL;
1185
1186 if (state)
1187 its_send_int(its_dev, event);
1188 else
1189 its_send_clear(its_dev, event);
1190
1191 return 0;
1192}
1193
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001194static void its_map_vm(struct its_node *its, struct its_vm *vm)
1195{
1196 unsigned long flags;
1197
1198 /* Not using the ITS list? Everything is always mapped. */
1199 if (!its_list_map)
1200 return;
1201
1202 raw_spin_lock_irqsave(&vmovp_lock, flags);
1203
1204 /*
1205 * If the VM wasn't mapped yet, iterate over the vpes and get
1206 * them mapped now.
1207 */
1208 vm->vlpi_count[its->list_nr]++;
1209
1210 if (vm->vlpi_count[its->list_nr] == 1) {
1211 int i;
1212
1213 for (i = 0; i < vm->nr_vpes; i++) {
1214 struct its_vpe *vpe = vm->vpes[i];
Marc Zyngier44c4c252017-10-19 10:11:34 +01001215 struct irq_data *d = irq_get_irq_data(vpe->irq);
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001216
1217 /* Map the VPE to the first possible CPU */
1218 vpe->col_idx = cpumask_first(cpu_online_mask);
1219 its_send_vmapp(its, vpe, true);
1220 its_send_vinvall(its, vpe);
Marc Zyngier44c4c252017-10-19 10:11:34 +01001221 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001222 }
1223 }
1224
1225 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1226}
1227
1228static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
1229{
1230 unsigned long flags;
1231
1232 /* Not using the ITS list? Everything is always mapped. */
1233 if (!its_list_map)
1234 return;
1235
1236 raw_spin_lock_irqsave(&vmovp_lock, flags);
1237
1238 if (!--vm->vlpi_count[its->list_nr]) {
1239 int i;
1240
1241 for (i = 0; i < vm->nr_vpes; i++)
1242 its_send_vmapp(its, vm->vpes[i], false);
1243 }
1244
1245 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1246}
1247
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001248static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1249{
1250 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1251 u32 event = its_get_event_id(d);
1252 int ret = 0;
1253
1254 if (!info->map)
1255 return -EINVAL;
1256
1257 mutex_lock(&its_dev->event_map.vlpi_lock);
1258
1259 if (!its_dev->event_map.vm) {
1260 struct its_vlpi_map *maps;
1261
Kees Cook6396bb22018-06-12 14:03:40 -07001262 maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001263 GFP_KERNEL);
1264 if (!maps) {
1265 ret = -ENOMEM;
1266 goto out;
1267 }
1268
1269 its_dev->event_map.vm = info->map->vm;
1270 its_dev->event_map.vlpi_maps = maps;
1271 } else if (its_dev->event_map.vm != info->map->vm) {
1272 ret = -EINVAL;
1273 goto out;
1274 }
1275
1276 /* Get our private copy of the mapping information */
1277 its_dev->event_map.vlpi_maps[event] = *info->map;
1278
1279 if (irqd_is_forwarded_to_vcpu(d)) {
1280 /* Already mapped, move it around */
1281 its_send_vmovi(its_dev, event);
1282 } else {
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001283 /* Ensure all the VPEs are mapped on this ITS */
1284 its_map_vm(its_dev->its, info->map->vm);
1285
Marc Zyngierd4d7b4a2017-10-26 10:44:07 +01001286 /*
1287 * Flag the interrupt as forwarded so that we can
1288 * start poking the virtual property table.
1289 */
1290 irqd_set_forwarded_to_vcpu(d);
1291
1292 /* Write out the property to the prop table */
1293 lpi_write_config(d, 0xff, info->map->properties);
1294
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001295 /* Drop the physical mapping */
1296 its_send_discard(its_dev, event);
1297
1298 /* and install the virtual one */
1299 its_send_vmapti(its_dev, event);
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001300
1301 /* Increment the number of VLPIs */
1302 its_dev->event_map.nr_vlpis++;
1303 }
1304
1305out:
1306 mutex_unlock(&its_dev->event_map.vlpi_lock);
1307 return ret;
1308}
1309
1310static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1311{
1312 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1313 u32 event = its_get_event_id(d);
1314 int ret = 0;
1315
1316 mutex_lock(&its_dev->event_map.vlpi_lock);
1317
1318 if (!its_dev->event_map.vm ||
1319 !its_dev->event_map.vlpi_maps[event].vm) {
1320 ret = -EINVAL;
1321 goto out;
1322 }
1323
1324 /* Copy our mapping information to the incoming request */
1325 *info->map = its_dev->event_map.vlpi_maps[event];
1326
1327out:
1328 mutex_unlock(&its_dev->event_map.vlpi_lock);
1329 return ret;
1330}
1331
1332static int its_vlpi_unmap(struct irq_data *d)
1333{
1334 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1335 u32 event = its_get_event_id(d);
1336 int ret = 0;
1337
1338 mutex_lock(&its_dev->event_map.vlpi_lock);
1339
1340 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
1341 ret = -EINVAL;
1342 goto out;
1343 }
1344
1345 /* Drop the virtual mapping */
1346 its_send_discard(its_dev, event);
1347
1348 /* and restore the physical one */
1349 irqd_clr_forwarded_to_vcpu(d);
1350 its_send_mapti(its_dev, d->hwirq, event);
1351 lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
1352 LPI_PROP_ENABLED |
1353 LPI_PROP_GROUP1));
1354
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001355 /* Potentially unmap the VM from this ITS */
1356 its_unmap_vm(its_dev->its, its_dev->event_map.vm);
1357
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001358 /*
1359 * Drop the refcount and make the device available again if
1360 * this was the last VLPI.
1361 */
1362 if (!--its_dev->event_map.nr_vlpis) {
1363 its_dev->event_map.vm = NULL;
1364 kfree(its_dev->event_map.vlpi_maps);
1365 }
1366
1367out:
1368 mutex_unlock(&its_dev->event_map.vlpi_lock);
1369 return ret;
1370}
1371
Marc Zyngier015ec032016-12-20 09:54:57 +00001372static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
1373{
1374 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1375
1376 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1377 return -EINVAL;
1378
1379 if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
1380 lpi_update_config(d, 0xff, info->config);
1381 else
1382 lpi_write_config(d, 0xff, info->config);
1383 its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
1384
1385 return 0;
1386}
1387
Marc Zyngierc808eea2016-12-20 09:31:20 +00001388static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
1389{
1390 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1391 struct its_cmd_info *info = vcpu_info;
1392
1393 /* Need a v4 ITS */
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001394 if (!its_dev->its->is_v4)
Marc Zyngierc808eea2016-12-20 09:31:20 +00001395 return -EINVAL;
1396
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001397 /* Unmap request? */
1398 if (!info)
1399 return its_vlpi_unmap(d);
1400
Marc Zyngierc808eea2016-12-20 09:31:20 +00001401 switch (info->cmd_type) {
1402 case MAP_VLPI:
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001403 return its_vlpi_map(d, info);
Marc Zyngierc808eea2016-12-20 09:31:20 +00001404
1405 case GET_VLPI:
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001406 return its_vlpi_get(d, info);
Marc Zyngierc808eea2016-12-20 09:31:20 +00001407
1408 case PROP_UPDATE_VLPI:
1409 case PROP_UPDATE_AND_INV_VLPI:
Marc Zyngier015ec032016-12-20 09:54:57 +00001410 return its_vlpi_prop_update(d, info);
Marc Zyngierc808eea2016-12-20 09:31:20 +00001411
1412 default:
1413 return -EINVAL;
1414 }
1415}
1416
Marc Zyngierc48ed512014-11-24 14:35:12 +00001417static struct irq_chip its_irq_chip = {
1418 .name = "ITS",
1419 .irq_mask = its_mask_irq,
1420 .irq_unmask = its_unmask_irq,
Ashok Kumar004fa082016-02-11 05:38:53 -08001421 .irq_eoi = irq_chip_eoi_parent,
Marc Zyngierc48ed512014-11-24 14:35:12 +00001422 .irq_set_affinity = its_set_affinity,
Marc Zyngierb48ac832014-11-24 14:35:16 +00001423 .irq_compose_msi_msg = its_irq_compose_msi_msg,
Marc Zyngier8d85dce2016-12-19 18:02:13 +00001424 .irq_set_irqchip_state = its_irq_set_irqchip_state,
Marc Zyngierc808eea2016-12-20 09:31:20 +00001425 .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
Marc Zyngierb48ac832014-11-24 14:35:16 +00001426};
1427
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001428
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001429/*
1430 * How we allocate LPIs:
1431 *
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001432 * lpi_range_list contains ranges of LPIs that are to available to
1433 * allocate from. To allocate LPIs, just pick the first range that
1434 * fits the required allocation, and reduce it by the required
1435 * amount. Once empty, remove the range from the list.
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001436 *
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001437 * To free a range of LPIs, add a free range to the list, sort it and
1438 * merge the result if the new range happens to be adjacent to an
1439 * already free block.
1440 *
1441 * The consequence of the above is that allocation is cost is low, but
1442 * freeing is expensive. We assumes that freeing rarely occurs.
1443 */
Jia He4cb205c2018-08-28 12:53:26 +08001444#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001445
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001446static DEFINE_MUTEX(lpi_range_lock);
1447static LIST_HEAD(lpi_range_list);
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001448
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001449struct lpi_range {
1450 struct list_head entry;
1451 u32 base_id;
1452 u32 span;
1453};
1454
1455static struct lpi_range *mk_lpi_range(u32 base, u32 span)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001456{
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001457 struct lpi_range *range;
1458
1459 range = kzalloc(sizeof(*range), GFP_KERNEL);
1460 if (range) {
1461 INIT_LIST_HEAD(&range->entry);
1462 range->base_id = base;
1463 range->span = span;
1464 }
1465
1466 return range;
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001467}
1468
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001469static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001470{
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001471 struct lpi_range *ra, *rb;
1472
1473 ra = container_of(a, struct lpi_range, entry);
1474 rb = container_of(b, struct lpi_range, entry);
1475
1476 return rb->base_id - ra->base_id;
1477}
1478
1479static void merge_lpi_ranges(void)
1480{
1481 struct lpi_range *range, *tmp;
1482
1483 list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
1484 if (!list_is_last(&range->entry, &lpi_range_list) &&
1485 (tmp->base_id == (range->base_id + range->span))) {
1486 tmp->base_id = range->base_id;
1487 tmp->span += range->span;
1488 list_del(&range->entry);
1489 kfree(range);
1490 }
1491 }
1492}
1493
1494static int alloc_lpi_range(u32 nr_lpis, u32 *base)
1495{
1496 struct lpi_range *range, *tmp;
1497 int err = -ENOSPC;
1498
1499 mutex_lock(&lpi_range_lock);
1500
1501 list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
1502 if (range->span >= nr_lpis) {
1503 *base = range->base_id;
1504 range->base_id += nr_lpis;
1505 range->span -= nr_lpis;
1506
1507 if (range->span == 0) {
1508 list_del(&range->entry);
1509 kfree(range);
1510 }
1511
1512 err = 0;
1513 break;
1514 }
1515 }
1516
1517 mutex_unlock(&lpi_range_lock);
1518
1519 pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
1520 return err;
1521}
1522
1523static int free_lpi_range(u32 base, u32 nr_lpis)
1524{
1525 struct lpi_range *new;
1526 int err = 0;
1527
1528 mutex_lock(&lpi_range_lock);
1529
1530 new = mk_lpi_range(base, nr_lpis);
1531 if (!new) {
1532 err = -ENOMEM;
1533 goto out;
1534 }
1535
1536 list_add(&new->entry, &lpi_range_list);
1537 list_sort(NULL, &lpi_range_list, lpi_range_cmp);
1538 merge_lpi_ranges();
1539out:
1540 mutex_unlock(&lpi_range_lock);
1541 return err;
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001542}
1543
Tomasz Nowicki04a0e4d2016-01-19 14:11:18 +01001544static int __init its_lpi_init(u32 id_bits)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001545{
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001546 u32 lpis = (1UL << id_bits) - 8192;
Marc Zyngier12b29052018-05-31 09:01:59 +01001547 u32 numlpis;
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001548 int err;
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001549
Marc Zyngier12b29052018-05-31 09:01:59 +01001550 numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
1551
1552 if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
1553 lpis = numlpis;
1554 pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
1555 lpis);
1556 }
1557
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001558 /*
1559 * Initializing the allocator is just the same as freeing the
1560 * full range of LPIs.
1561 */
1562 err = free_lpi_range(8192, lpis);
1563 pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
1564 return err;
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001565}
1566
Marc Zyngier38dd7c42018-05-27 17:03:03 +01001567static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001568{
1569 unsigned long *bitmap = NULL;
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001570 int err = 0;
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001571
1572 do {
Marc Zyngier38dd7c42018-05-27 17:03:03 +01001573 err = alloc_lpi_range(nr_irqs, base);
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001574 if (!err)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001575 break;
1576
Marc Zyngier38dd7c42018-05-27 17:03:03 +01001577 nr_irqs /= 2;
1578 } while (nr_irqs > 0);
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001579
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001580 if (err)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001581 goto out;
1582
Marc Zyngier38dd7c42018-05-27 17:03:03 +01001583 bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC);
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001584 if (!bitmap)
1585 goto out;
1586
Marc Zyngier38dd7c42018-05-27 17:03:03 +01001587 *nr_ids = nr_irqs;
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001588
1589out:
Marc Zyngierc8415b92015-10-02 16:44:05 +01001590 if (!bitmap)
1591 *base = *nr_ids = 0;
1592
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001593 return bitmap;
1594}
1595
Marc Zyngier38dd7c42018-05-27 17:03:03 +01001596static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001597{
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001598 WARN_ON(free_lpi_range(base, nr_ids));
Marc Zyngiercf2be8b2016-12-19 18:49:59 +00001599 kfree(bitmap);
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001600}
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001601
Marc Zyngier053be482018-07-27 15:02:27 +01001602static void gic_reset_prop_table(void *va)
1603{
1604 /* Priority 0xa0, Group-1, disabled */
1605 memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ);
1606
1607 /* Make sure the GIC will observe the written configuration */
1608 gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ);
1609}
1610
Marc Zyngier0e5ccf92016-12-19 18:15:05 +00001611static struct page *its_allocate_prop_table(gfp_t gfp_flags)
1612{
1613 struct page *prop_page;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001614
Marc Zyngier0e5ccf92016-12-19 18:15:05 +00001615 prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
1616 if (!prop_page)
1617 return NULL;
1618
Marc Zyngier053be482018-07-27 15:02:27 +01001619 gic_reset_prop_table(page_address(prop_page));
Marc Zyngier0e5ccf92016-12-19 18:15:05 +00001620
1621 return prop_page;
1622}
1623
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00001624static void its_free_prop_table(struct page *prop_page)
1625{
1626 free_pages((unsigned long)page_address(prop_page),
1627 get_order(LPI_PROPBASE_SZ));
1628}
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001629
Marc Zyngier11e37d32018-07-27 13:38:54 +01001630static int __init its_setup_lpi_prop_table(void)
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001631{
Marc Zyngierc440a9d2018-07-27 15:40:13 +01001632 if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) {
1633 u64 val;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001634
Marc Zyngierc440a9d2018-07-27 15:40:13 +01001635 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
1636 lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1;
1637
1638 gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12);
1639 gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa,
1640 LPI_PROPBASE_SZ,
1641 MEMREMAP_WB);
1642 gic_reset_prop_table(gic_rdists->prop_table_va);
1643 } else {
1644 struct page *page;
1645
1646 lpi_id_bits = min_t(u32,
1647 GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
1648 ITS_MAX_LPI_NRBITS);
1649 page = its_allocate_prop_table(GFP_NOWAIT);
1650 if (!page) {
1651 pr_err("Failed to allocate PROPBASE\n");
1652 return -ENOMEM;
1653 }
1654
1655 gic_rdists->prop_table_pa = page_to_phys(page);
1656 gic_rdists->prop_table_va = page_address(page);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001657 }
1658
Marc Zyngiere1a2e202018-07-27 14:36:00 +01001659 pr_info("GICv3: using LPI property table @%pa\n",
1660 &gic_rdists->prop_table_pa);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001661
Shanker Donthineni6c31e122017-06-22 18:19:14 -05001662 return its_lpi_init(lpi_id_bits);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001663}
1664
1665static const char *its_base_type_string[] = {
1666 [GITS_BASER_TYPE_DEVICE] = "Devices",
1667 [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
Marc Zyngier4f46de92016-12-20 15:50:14 +00001668 [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)",
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001669 [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
1670 [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
1671 [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
1672 [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
1673};
1674
Shanker Donthineni2d81d422016-06-06 18:17:28 -05001675static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
1676{
1677 u32 idx = baser - its->tables;
1678
Vladimir Murzin0968a612016-11-02 11:54:06 +00001679 return gits_read_baser(its->base + GITS_BASER + (idx << 3));
Shanker Donthineni2d81d422016-06-06 18:17:28 -05001680}
1681
1682static void its_write_baser(struct its_node *its, struct its_baser *baser,
1683 u64 val)
1684{
1685 u32 idx = baser - its->tables;
1686
Vladimir Murzin0968a612016-11-02 11:54:06 +00001687 gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
Shanker Donthineni2d81d422016-06-06 18:17:28 -05001688 baser->val = its_read_baser(its, baser);
1689}
1690
Shanker Donthineni93473592016-06-06 18:17:30 -05001691static int its_setup_baser(struct its_node *its, struct its_baser *baser,
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001692 u64 cache, u64 shr, u32 psz, u32 order,
1693 bool indirect)
Shanker Donthineni93473592016-06-06 18:17:30 -05001694{
1695 u64 val = its_read_baser(its, baser);
1696 u64 esz = GITS_BASER_ENTRY_SIZE(val);
1697 u64 type = GITS_BASER_TYPE(val);
Shanker Donthineni30ae9612017-10-09 11:46:55 -05001698 u64 baser_phys, tmp;
Shanker Donthineni93473592016-06-06 18:17:30 -05001699 u32 alloc_pages;
1700 void *base;
Shanker Donthineni93473592016-06-06 18:17:30 -05001701
1702retry_alloc_baser:
1703 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
1704 if (alloc_pages > GITS_BASER_PAGES_MAX) {
1705 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
1706 &its->phys_base, its_base_type_string[type],
1707 alloc_pages, GITS_BASER_PAGES_MAX);
1708 alloc_pages = GITS_BASER_PAGES_MAX;
1709 order = get_order(GITS_BASER_PAGES_MAX * psz);
1710 }
1711
1712 base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
1713 if (!base)
1714 return -ENOMEM;
1715
Shanker Donthineni30ae9612017-10-09 11:46:55 -05001716 baser_phys = virt_to_phys(base);
1717
1718 /* Check if the physical address of the memory is above 48bits */
1719 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
1720
1721 /* 52bit PA is supported only when PageSize=64K */
1722 if (psz != SZ_64K) {
1723 pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
1724 free_pages((unsigned long)base, order);
1725 return -ENXIO;
1726 }
1727
1728 /* Convert 52bit PA to 48bit field */
1729 baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
1730 }
1731
Shanker Donthineni93473592016-06-06 18:17:30 -05001732retry_baser:
Shanker Donthineni30ae9612017-10-09 11:46:55 -05001733 val = (baser_phys |
Shanker Donthineni93473592016-06-06 18:17:30 -05001734 (type << GITS_BASER_TYPE_SHIFT) |
1735 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
1736 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
1737 cache |
1738 shr |
1739 GITS_BASER_VALID);
1740
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001741 val |= indirect ? GITS_BASER_INDIRECT : 0x0;
1742
Shanker Donthineni93473592016-06-06 18:17:30 -05001743 switch (psz) {
1744 case SZ_4K:
1745 val |= GITS_BASER_PAGE_SIZE_4K;
1746 break;
1747 case SZ_16K:
1748 val |= GITS_BASER_PAGE_SIZE_16K;
1749 break;
1750 case SZ_64K:
1751 val |= GITS_BASER_PAGE_SIZE_64K;
1752 break;
1753 }
1754
1755 its_write_baser(its, baser, val);
1756 tmp = baser->val;
1757
1758 if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
1759 /*
1760 * Shareability didn't stick. Just use
1761 * whatever the read reported, which is likely
1762 * to be the only thing this redistributor
1763 * supports. If that's zero, make it
1764 * non-cacheable as well.
1765 */
1766 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
1767 if (!shr) {
1768 cache = GITS_BASER_nC;
Vladimir Murzin328191c2016-11-02 11:54:05 +00001769 gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
Shanker Donthineni93473592016-06-06 18:17:30 -05001770 }
1771 goto retry_baser;
1772 }
1773
1774 if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
1775 /*
1776 * Page size didn't stick. Let's try a smaller
1777 * size and retry. If we reach 4K, then
1778 * something is horribly wrong...
1779 */
1780 free_pages((unsigned long)base, order);
1781 baser->base = NULL;
1782
1783 switch (psz) {
1784 case SZ_16K:
1785 psz = SZ_4K;
1786 goto retry_alloc_baser;
1787 case SZ_64K:
1788 psz = SZ_16K;
1789 goto retry_alloc_baser;
1790 }
1791 }
1792
1793 if (val != tmp) {
Vladimir Murzinb11283e2016-11-02 11:54:03 +00001794 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
Shanker Donthineni93473592016-06-06 18:17:30 -05001795 &its->phys_base, its_base_type_string[type],
Vladimir Murzinb11283e2016-11-02 11:54:03 +00001796 val, tmp);
Shanker Donthineni93473592016-06-06 18:17:30 -05001797 free_pages((unsigned long)base, order);
1798 return -ENXIO;
1799 }
1800
1801 baser->order = order;
1802 baser->base = base;
1803 baser->psz = psz;
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001804 tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
Shanker Donthineni93473592016-06-06 18:17:30 -05001805
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001806 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
Vladimir Murzind524eaa2016-11-02 11:54:04 +00001807 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
Shanker Donthineni93473592016-06-06 18:17:30 -05001808 its_base_type_string[type],
1809 (unsigned long)virt_to_phys(base),
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001810 indirect ? "indirect" : "flat", (int)esz,
Shanker Donthineni93473592016-06-06 18:17:30 -05001811 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
1812
1813 return 0;
1814}
1815
Marc Zyngier4cacac52016-12-19 18:18:34 +00001816static bool its_parse_indirect_baser(struct its_node *its,
1817 struct its_baser *baser,
Shanker Donthineni32bd44d2017-10-07 15:43:48 -05001818 u32 psz, u32 *order, u32 ids)
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001819{
Marc Zyngier4cacac52016-12-19 18:18:34 +00001820 u64 tmp = its_read_baser(its, baser);
1821 u64 type = GITS_BASER_TYPE(tmp);
1822 u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06001823 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001824 u32 new_order = *order;
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001825 bool indirect = false;
1826
1827 /* No need to enable Indirection if memory requirement < (psz*2)bytes */
1828 if ((esz << ids) > (psz * 2)) {
1829 /*
1830 * Find out whether hw supports a single or two-level table by
1831 * table by reading bit at offset '62' after writing '1' to it.
1832 */
1833 its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
1834 indirect = !!(baser->val & GITS_BASER_INDIRECT);
1835
1836 if (indirect) {
1837 /*
1838 * The size of the lvl2 table is equal to ITS page size
1839 * which is 'psz'. For computing lvl1 table size,
1840 * subtract ID bits that sparse lvl2 table from 'ids'
1841 * which is reported by ITS hardware times lvl1 table
1842 * entry size.
1843 */
Vladimir Murzind524eaa2016-11-02 11:54:04 +00001844 ids -= ilog2(psz / (int)esz);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001845 esz = GITS_LVL1_ENTRY_SIZE;
1846 }
1847 }
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001848
1849 /*
1850 * Allocate as many entries as required to fit the
1851 * range of device IDs that the ITS can grok... The ID
1852 * space being incredibly sparse, this results in a
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001853 * massive waste of memory if two-level device table
1854 * feature is not supported by hardware.
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001855 */
1856 new_order = max_t(u32, get_order(esz << ids), new_order);
1857 if (new_order >= MAX_ORDER) {
1858 new_order = MAX_ORDER - 1;
Vladimir Murzind524eaa2016-11-02 11:54:04 +00001859 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
Marc Zyngier4cacac52016-12-19 18:18:34 +00001860 pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n",
1861 &its->phys_base, its_base_type_string[type],
1862 its->device_ids, ids);
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001863 }
1864
1865 *order = new_order;
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001866
1867 return indirect;
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001868}
1869
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001870static void its_free_tables(struct its_node *its)
1871{
1872 int i;
1873
1874 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
Shanker Donthineni1a485f42016-02-01 20:19:44 -06001875 if (its->tables[i].base) {
1876 free_pages((unsigned long)its->tables[i].base,
1877 its->tables[i].order);
1878 its->tables[i].base = NULL;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001879 }
1880 }
1881}
1882
Shanker Donthineni0e0b0f62016-06-06 18:17:31 -05001883static int its_alloc_tables(struct its_node *its)
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001884{
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001885 u64 shr = GITS_BASER_InnerShareable;
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06001886 u64 cache = GITS_BASER_RaWaWb;
Shanker Donthineni93473592016-06-06 18:17:30 -05001887 u32 psz = SZ_64K;
1888 int err, i;
Robert Richter94100972015-09-21 22:58:38 +02001889
Ard Biesheuvelfa150012017-10-17 17:55:54 +01001890 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
1891 /* erratum 24313: ignore memory access type */
1892 cache = GITS_BASER_nCnB;
Shanker Donthineni466b7d12016-03-09 22:10:49 -06001893
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001894 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
Shanker Donthineni2d81d422016-06-06 18:17:28 -05001895 struct its_baser *baser = its->tables + i;
1896 u64 val = its_read_baser(its, baser);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001897 u64 type = GITS_BASER_TYPE(val);
Shanker Donthineni93473592016-06-06 18:17:30 -05001898 u32 order = get_order(psz);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001899 bool indirect = false;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001900
Marc Zyngier4cacac52016-12-19 18:18:34 +00001901 switch (type) {
1902 case GITS_BASER_TYPE_NONE:
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001903 continue;
1904
Marc Zyngier4cacac52016-12-19 18:18:34 +00001905 case GITS_BASER_TYPE_DEVICE:
Shanker Donthineni32bd44d2017-10-07 15:43:48 -05001906 indirect = its_parse_indirect_baser(its, baser,
1907 psz, &order,
1908 its->device_ids);
Marc Zyngier4cacac52016-12-19 18:18:34 +00001909 case GITS_BASER_TYPE_VCPU:
1910 indirect = its_parse_indirect_baser(its, baser,
Shanker Donthineni32bd44d2017-10-07 15:43:48 -05001911 psz, &order,
1912 ITS_MAX_VPEID_BITS);
Marc Zyngier4cacac52016-12-19 18:18:34 +00001913 break;
1914 }
Marc Zyngierf54b97e2015-03-06 16:37:41 +00001915
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001916 err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
Shanker Donthineni93473592016-06-06 18:17:30 -05001917 if (err < 0) {
1918 its_free_tables(its);
1919 return err;
Robert Richter30f21362015-09-21 22:58:34 +02001920 }
1921
Shanker Donthineni93473592016-06-06 18:17:30 -05001922 /* Update settings which will be used for next BASERn */
1923 psz = baser->psz;
1924 cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
1925 shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001926 }
1927
1928 return 0;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001929}
1930
1931static int its_alloc_collections(struct its_node *its)
1932{
Marc Zyngier83559b42018-06-22 10:52:52 +01001933 int i;
1934
Kees Cook6396bb22018-06-12 14:03:40 -07001935 its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001936 GFP_KERNEL);
1937 if (!its->collections)
1938 return -ENOMEM;
1939
Marc Zyngier83559b42018-06-22 10:52:52 +01001940 for (i = 0; i < nr_cpu_ids; i++)
1941 its->collections[i].target_address = ~0ULL;
1942
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001943 return 0;
1944}
1945
Marc Zyngier7c297a22016-12-19 18:34:38 +00001946static struct page *its_allocate_pending_table(gfp_t gfp_flags)
1947{
1948 struct page *pend_page;
Marc Zyngieradaab502018-07-17 18:06:39 +01001949
Marc Zyngier7c297a22016-12-19 18:34:38 +00001950 pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
Marc Zyngieradaab502018-07-17 18:06:39 +01001951 get_order(LPI_PENDBASE_SZ));
Marc Zyngier7c297a22016-12-19 18:34:38 +00001952 if (!pend_page)
1953 return NULL;
1954
1955 /* Make sure the GIC will observe the zero-ed page */
1956 gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
1957
1958 return pend_page;
1959}
1960
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00001961static void its_free_pending_table(struct page *pt)
1962{
Marc Zyngieradaab502018-07-17 18:06:39 +01001963 free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00001964}
1965
Marc Zyngierc440a9d2018-07-27 15:40:13 +01001966static bool enabled_lpis_allowed(void)
1967{
1968 return false;
1969}
1970
Marc Zyngier11e37d32018-07-27 13:38:54 +01001971static int __init allocate_lpi_tables(void)
1972{
Marc Zyngierc440a9d2018-07-27 15:40:13 +01001973 u64 val;
Marc Zyngier11e37d32018-07-27 13:38:54 +01001974 int err, cpu;
1975
Marc Zyngierc440a9d2018-07-27 15:40:13 +01001976 /*
1977 * If LPIs are enabled while we run this from the boot CPU,
1978 * flag the RD tables as pre-allocated if the stars do align.
1979 */
1980 val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR);
1981 if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) {
1982 gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED |
1983 RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING);
1984 pr_info("GICv3: Using preallocated redistributor tables\n");
1985 }
1986
Marc Zyngier11e37d32018-07-27 13:38:54 +01001987 err = its_setup_lpi_prop_table();
1988 if (err)
1989 return err;
1990
1991 /*
1992 * We allocate all the pending tables anyway, as we may have a
1993 * mix of RDs that have had LPIs enabled, and some that
1994 * don't. We'll free the unused ones as each CPU comes online.
1995 */
1996 for_each_possible_cpu(cpu) {
1997 struct page *pend_page;
1998
1999 pend_page = its_allocate_pending_table(GFP_NOWAIT);
2000 if (!pend_page) {
2001 pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
2002 return -ENOMEM;
2003 }
2004
2005 gic_data_rdist_cpu(cpu)->pend_page = pend_page;
2006 }
2007
2008 return 0;
2009}
2010
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002011static void its_cpu_init_lpis(void)
2012{
2013 void __iomem *rbase = gic_data_rdist_rd_base();
2014 struct page *pend_page;
Marc Zyngier11e37d32018-07-27 13:38:54 +01002015 phys_addr_t paddr;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002016 u64 val, tmp;
2017
Marc Zyngier11e37d32018-07-27 13:38:54 +01002018 if (gic_data_rdist()->lpi_enabled)
2019 return;
2020
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002021 val = readl_relaxed(rbase + GICR_CTLR);
2022 if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) &&
2023 (val & GICR_CTLR_ENABLE_LPIS)) {
2024 paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER);
2025 paddr &= GENMASK_ULL(51, 16);
2026
2027 its_free_pending_table(gic_data_rdist()->pend_page);
2028 gic_data_rdist()->pend_page = NULL;
2029
2030 goto out;
2031 }
2032
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002033 pend_page = gic_data_rdist()->pend_page;
Marc Zyngier11e37d32018-07-27 13:38:54 +01002034 paddr = page_to_phys(pend_page);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002035
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002036 /* set PROPBASE */
Marc Zyngiere1a2e202018-07-27 14:36:00 +01002037 val = (gic_rdists->prop_table_pa |
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002038 GICR_PROPBASER_InnerShareable |
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06002039 GICR_PROPBASER_RaWaWb |
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002040 ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
2041
Vladimir Murzin0968a612016-11-02 11:54:06 +00002042 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
2043 tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002044
2045 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
Marc Zyngier241a3862015-03-27 14:15:05 +00002046 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
2047 /*
2048 * The HW reports non-shareable, we must
2049 * remove the cacheability attributes as
2050 * well.
2051 */
2052 val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
2053 GICR_PROPBASER_CACHEABILITY_MASK);
2054 val |= GICR_PROPBASER_nC;
Vladimir Murzin0968a612016-11-02 11:54:06 +00002055 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
Marc Zyngier241a3862015-03-27 14:15:05 +00002056 }
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002057 pr_info_once("GIC: using cache flushing for LPI property table\n");
2058 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
2059 }
2060
2061 /* set PENDBASE */
2062 val = (page_to_phys(pend_page) |
Marc Zyngier4ad3e362015-03-27 14:15:04 +00002063 GICR_PENDBASER_InnerShareable |
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06002064 GICR_PENDBASER_RaWaWb);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002065
Vladimir Murzin0968a612016-11-02 11:54:06 +00002066 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
2067 tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
Marc Zyngier241a3862015-03-27 14:15:05 +00002068
2069 if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
2070 /*
2071 * The HW reports non-shareable, we must remove the
2072 * cacheability attributes as well.
2073 */
2074 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
2075 GICR_PENDBASER_CACHEABILITY_MASK);
2076 val |= GICR_PENDBASER_nC;
Vladimir Murzin0968a612016-11-02 11:54:06 +00002077 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
Marc Zyngier241a3862015-03-27 14:15:05 +00002078 }
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002079
2080 /* Enable LPIs */
2081 val = readl_relaxed(rbase + GICR_CTLR);
2082 val |= GICR_CTLR_ENABLE_LPIS;
2083 writel_relaxed(val, rbase + GICR_CTLR);
2084
2085 /* Make sure the GIC has seen the above */
2086 dsb(sy);
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002087out:
Marc Zyngier11e37d32018-07-27 13:38:54 +01002088 gic_data_rdist()->lpi_enabled = true;
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002089 pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
Marc Zyngier11e37d32018-07-27 13:38:54 +01002090 smp_processor_id(),
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002091 gic_data_rdist()->pend_page ? "allocated" : "reserved",
Marc Zyngier11e37d32018-07-27 13:38:54 +01002092 &paddr);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002093}
2094
Derek Basehore920181c2018-02-28 21:48:20 -08002095static void its_cpu_init_collection(struct its_node *its)
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002096{
Derek Basehore920181c2018-02-28 21:48:20 -08002097 int cpu = smp_processor_id();
2098 u64 target;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002099
Derek Basehore920181c2018-02-28 21:48:20 -08002100 /* avoid cross node collections and its mapping */
2101 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
2102 struct device_node *cpu_node;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002103
Derek Basehore920181c2018-02-28 21:48:20 -08002104 cpu_node = of_get_cpu_node(cpu, NULL);
2105 if (its->numa_node != NUMA_NO_NODE &&
2106 its->numa_node != of_node_to_nid(cpu_node))
2107 return;
2108 }
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002109
Derek Basehore920181c2018-02-28 21:48:20 -08002110 /*
2111 * We now have to bind each collection to its target
2112 * redistributor.
2113 */
2114 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002115 /*
Derek Basehore920181c2018-02-28 21:48:20 -08002116 * This ITS wants the physical address of the
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002117 * redistributor.
2118 */
Derek Basehore920181c2018-02-28 21:48:20 -08002119 target = gic_data_rdist()->phys_base;
2120 } else {
2121 /* This ITS wants a linear CPU number. */
2122 target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2123 target = GICR_TYPER_CPU_NUMBER(target) << 16;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002124 }
2125
Derek Basehore920181c2018-02-28 21:48:20 -08002126 /* Perform collection mapping */
2127 its->collections[cpu].target_address = target;
2128 its->collections[cpu].col_id = cpu;
2129
2130 its_send_mapc(its, &its->collections[cpu], 1);
2131 its_send_invall(its, &its->collections[cpu]);
2132}
2133
2134static void its_cpu_init_collections(void)
2135{
2136 struct its_node *its;
2137
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02002138 raw_spin_lock(&its_lock);
Derek Basehore920181c2018-02-28 21:48:20 -08002139
2140 list_for_each_entry(its, &its_nodes, entry)
2141 its_cpu_init_collection(its);
2142
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02002143 raw_spin_unlock(&its_lock);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002144}
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002145
2146static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
2147{
2148 struct its_device *its_dev = NULL, *tmp;
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002149 unsigned long flags;
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002150
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002151 raw_spin_lock_irqsave(&its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002152
2153 list_for_each_entry(tmp, &its->its_device_list, entry) {
2154 if (tmp->device_id == dev_id) {
2155 its_dev = tmp;
2156 break;
2157 }
2158 }
2159
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002160 raw_spin_unlock_irqrestore(&its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002161
2162 return its_dev;
2163}
2164
Shanker Donthineni466b7d12016-03-09 22:10:49 -06002165static struct its_baser *its_get_baser(struct its_node *its, u32 type)
2166{
2167 int i;
2168
2169 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2170 if (GITS_BASER_TYPE(its->tables[i].val) == type)
2171 return &its->tables[i];
2172 }
2173
2174 return NULL;
2175}
2176
Marc Zyngier70cc81e2016-12-19 18:53:02 +00002177static bool its_alloc_table_entry(struct its_baser *baser, u32 id)
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002178{
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002179 struct page *page;
2180 u32 esz, idx;
2181 __le64 *table;
2182
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002183 /* Don't allow device id that exceeds single, flat table limit */
2184 esz = GITS_BASER_ENTRY_SIZE(baser->val);
2185 if (!(baser->val & GITS_BASER_INDIRECT))
Marc Zyngier70cc81e2016-12-19 18:53:02 +00002186 return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002187
2188 /* Compute 1st level table index & check if that exceeds table limit */
Marc Zyngier70cc81e2016-12-19 18:53:02 +00002189 idx = id >> ilog2(baser->psz / esz);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002190 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
2191 return false;
2192
2193 table = baser->base;
2194
2195 /* Allocate memory for 2nd level table */
2196 if (!table[idx]) {
2197 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz));
2198 if (!page)
2199 return false;
2200
2201 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
2202 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
Vladimir Murzin328191c2016-11-02 11:54:05 +00002203 gic_flush_dcache_to_poc(page_address(page), baser->psz);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002204
2205 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
2206
2207 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
2208 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
Vladimir Murzin328191c2016-11-02 11:54:05 +00002209 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002210
2211 /* Ensure updated table contents are visible to ITS hardware */
2212 dsb(sy);
2213 }
2214
2215 return true;
2216}
2217
Marc Zyngier70cc81e2016-12-19 18:53:02 +00002218static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
2219{
2220 struct its_baser *baser;
2221
2222 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
2223
2224 /* Don't allow device id that exceeds ITS hardware limit */
2225 if (!baser)
2226 return (ilog2(dev_id) < its->device_ids);
2227
2228 return its_alloc_table_entry(baser, dev_id);
2229}
2230
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002231static bool its_alloc_vpe_table(u32 vpe_id)
2232{
2233 struct its_node *its;
2234
2235 /*
2236 * Make sure the L2 tables are allocated on *all* v4 ITSs. We
2237 * could try and only do it on ITSs corresponding to devices
2238 * that have interrupts targeted at this VPE, but the
2239 * complexity becomes crazy (and you have tons of memory
2240 * anyway, right?).
2241 */
2242 list_for_each_entry(its, &its_nodes, entry) {
2243 struct its_baser *baser;
2244
2245 if (!its->is_v4)
2246 continue;
2247
2248 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
2249 if (!baser)
2250 return false;
2251
2252 if (!its_alloc_table_entry(baser, vpe_id))
2253 return false;
2254 }
2255
2256 return true;
2257}
2258
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002259static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002260 int nvecs, bool alloc_lpis)
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002261{
2262 struct its_device *dev;
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002263 unsigned long *lpi_map = NULL;
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002264 unsigned long flags;
Marc Zyngier591e5be2015-07-17 10:46:42 +01002265 u16 *col_map = NULL;
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002266 void *itt;
2267 int lpi_base;
2268 int nr_lpis;
Marc Zyngierc8481262014-12-12 10:51:24 +00002269 int nr_ites;
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002270 int sz;
2271
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002272 if (!its_alloc_device_table(its, dev_id))
Shanker Donthineni466b7d12016-03-09 22:10:49 -06002273 return NULL;
2274
Marc Zyngier147c8f32018-05-27 16:39:55 +01002275 if (WARN_ON(!is_power_of_2(nvecs)))
2276 nvecs = roundup_pow_of_two(nvecs);
2277
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002278 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
Marc Zyngierc8481262014-12-12 10:51:24 +00002279 /*
Marc Zyngier147c8f32018-05-27 16:39:55 +01002280 * Even if the device wants a single LPI, the ITT must be
2281 * sized as a power of two (and you need at least one bit...).
Marc Zyngierc8481262014-12-12 10:51:24 +00002282 */
Marc Zyngier147c8f32018-05-27 16:39:55 +01002283 nr_ites = max(2, nvecs);
Marc Zyngierc8481262014-12-12 10:51:24 +00002284 sz = nr_ites * its->ite_size;
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002285 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
Yun Wu6c834122015-03-06 16:37:46 +00002286 itt = kzalloc(sz, GFP_KERNEL);
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002287 if (alloc_lpis) {
Marc Zyngier38dd7c42018-05-27 17:03:03 +01002288 lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002289 if (lpi_map)
Kees Cook6396bb22018-06-12 14:03:40 -07002290 col_map = kcalloc(nr_lpis, sizeof(*col_map),
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002291 GFP_KERNEL);
2292 } else {
Kees Cook6396bb22018-06-12 14:03:40 -07002293 col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL);
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002294 nr_lpis = 0;
2295 lpi_base = 0;
2296 }
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002297
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002298 if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002299 kfree(dev);
2300 kfree(itt);
2301 kfree(lpi_map);
Marc Zyngier591e5be2015-07-17 10:46:42 +01002302 kfree(col_map);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002303 return NULL;
2304 }
2305
Vladimir Murzin328191c2016-11-02 11:54:05 +00002306 gic_flush_dcache_to_poc(itt, sz);
Marc Zyngier5a9a8912015-09-13 12:14:32 +01002307
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002308 dev->its = its;
2309 dev->itt = itt;
Marc Zyngierc8481262014-12-12 10:51:24 +00002310 dev->nr_ites = nr_ites;
Marc Zyngier591e5be2015-07-17 10:46:42 +01002311 dev->event_map.lpi_map = lpi_map;
2312 dev->event_map.col_map = col_map;
2313 dev->event_map.lpi_base = lpi_base;
2314 dev->event_map.nr_lpis = nr_lpis;
Marc Zyngierd011e4e2016-12-20 09:44:41 +00002315 mutex_init(&dev->event_map.vlpi_lock);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002316 dev->device_id = dev_id;
2317 INIT_LIST_HEAD(&dev->entry);
2318
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002319 raw_spin_lock_irqsave(&its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002320 list_add(&dev->entry, &its->its_device_list);
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002321 raw_spin_unlock_irqrestore(&its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002322
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002323 /* Map device to its ITT */
2324 its_send_mapd(dev, 1);
2325
2326 return dev;
2327}
2328
2329static void its_free_device(struct its_device *its_dev)
2330{
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002331 unsigned long flags;
2332
2333 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002334 list_del(&its_dev->entry);
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002335 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002336 kfree(its_dev->itt);
2337 kfree(its_dev);
2338}
Marc Zyngierb48ac832014-11-24 14:35:16 +00002339
2340static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
2341{
2342 int idx;
2343
Marc Zyngier591e5be2015-07-17 10:46:42 +01002344 idx = find_first_zero_bit(dev->event_map.lpi_map,
2345 dev->event_map.nr_lpis);
2346 if (idx == dev->event_map.nr_lpis)
Marc Zyngierb48ac832014-11-24 14:35:16 +00002347 return -ENOSPC;
2348
Marc Zyngier591e5be2015-07-17 10:46:42 +01002349 *hwirq = dev->event_map.lpi_base + idx;
2350 set_bit(idx, dev->event_map.lpi_map);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002351
Marc Zyngierb48ac832014-11-24 14:35:16 +00002352 return 0;
2353}
2354
Marc Zyngier54456db2015-07-28 14:46:21 +01002355static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
2356 int nvec, msi_alloc_info_t *info)
Marc Zyngiere8137f42015-03-06 16:37:42 +00002357{
Marc Zyngierb48ac832014-11-24 14:35:16 +00002358 struct its_node *its;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002359 struct its_device *its_dev;
Marc Zyngier54456db2015-07-28 14:46:21 +01002360 struct msi_domain_info *msi_info;
2361 u32 dev_id;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002362
Marc Zyngier54456db2015-07-28 14:46:21 +01002363 /*
2364 * We ignore "dev" entierely, and rely on the dev_id that has
2365 * been passed via the scratchpad. This limits this domain's
2366 * usefulness to upper layers that definitely know that they
2367 * are built on top of the ITS.
2368 */
2369 dev_id = info->scratchpad[0].ul;
2370
2371 msi_info = msi_get_domain_info(domain);
2372 its = msi_info->data;
2373
Marc Zyngier20b3d542016-12-20 15:23:22 +00002374 if (!gic_rdists->has_direct_lpi &&
2375 vpe_proxy.dev &&
2376 vpe_proxy.dev->its == its &&
2377 dev_id == vpe_proxy.dev->device_id) {
2378 /* Bad luck. Get yourself a better implementation */
2379 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
2380 dev_id);
2381 return -EINVAL;
2382 }
2383
Marc Zyngierf1304202015-07-28 14:46:18 +01002384 its_dev = its_find_device(its, dev_id);
Marc Zyngiere8137f42015-03-06 16:37:42 +00002385 if (its_dev) {
2386 /*
2387 * We already have seen this ID, probably through
2388 * another alias (PCI bridge of some sort). No need to
2389 * create the device.
2390 */
Marc Zyngierf1304202015-07-28 14:46:18 +01002391 pr_debug("Reusing ITT for devID %x\n", dev_id);
Marc Zyngiere8137f42015-03-06 16:37:42 +00002392 goto out;
2393 }
Marc Zyngierb48ac832014-11-24 14:35:16 +00002394
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002395 its_dev = its_create_device(its, dev_id, nvec, true);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002396 if (!its_dev)
2397 return -ENOMEM;
2398
Marc Zyngierf1304202015-07-28 14:46:18 +01002399 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
Marc Zyngiere8137f42015-03-06 16:37:42 +00002400out:
Marc Zyngierb48ac832014-11-24 14:35:16 +00002401 info->scratchpad[0].ptr = its_dev;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002402 return 0;
2403}
2404
Marc Zyngier54456db2015-07-28 14:46:21 +01002405static struct msi_domain_ops its_msi_domain_ops = {
2406 .msi_prepare = its_msi_prepare,
2407};
2408
Marc Zyngierb48ac832014-11-24 14:35:16 +00002409static int its_irq_gic_domain_alloc(struct irq_domain *domain,
2410 unsigned int virq,
2411 irq_hw_number_t hwirq)
2412{
Marc Zyngierf833f572015-10-13 12:51:33 +01002413 struct irq_fwspec fwspec;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002414
Marc Zyngierf833f572015-10-13 12:51:33 +01002415 if (irq_domain_get_of_node(domain->parent)) {
2416 fwspec.fwnode = domain->parent->fwnode;
2417 fwspec.param_count = 3;
2418 fwspec.param[0] = GIC_IRQ_TYPE_LPI;
2419 fwspec.param[1] = hwirq;
2420 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02002421 } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
2422 fwspec.fwnode = domain->parent->fwnode;
2423 fwspec.param_count = 2;
2424 fwspec.param[0] = hwirq;
2425 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
Marc Zyngierf833f572015-10-13 12:51:33 +01002426 } else {
2427 return -EINVAL;
2428 }
Marc Zyngierb48ac832014-11-24 14:35:16 +00002429
Marc Zyngierf833f572015-10-13 12:51:33 +01002430 return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002431}
2432
2433static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2434 unsigned int nr_irqs, void *args)
2435{
2436 msi_alloc_info_t *info = args;
2437 struct its_device *its_dev = info->scratchpad[0].ptr;
2438 irq_hw_number_t hwirq;
2439 int err;
2440 int i;
2441
2442 for (i = 0; i < nr_irqs; i++) {
2443 err = its_alloc_device_irq(its_dev, &hwirq);
2444 if (err)
2445 return err;
2446
2447 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq);
2448 if (err)
2449 return err;
2450
2451 irq_domain_set_hwirq_and_chip(domain, virq + i,
2452 hwirq, &its_irq_chip, its_dev);
Marc Zyngier0d224d32017-08-18 09:39:18 +01002453 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
Marc Zyngierf1304202015-07-28 14:46:18 +01002454 pr_debug("ID:%d pID:%d vID:%d\n",
2455 (int)(hwirq - its_dev->event_map.lpi_base),
2456 (int) hwirq, virq + i);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002457 }
2458
2459 return 0;
2460}
2461
Thomas Gleixner72491642017-09-13 23:29:10 +02002462static int its_irq_domain_activate(struct irq_domain *domain,
Thomas Gleixner702cb0a2017-12-29 16:59:06 +01002463 struct irq_data *d, bool reserve)
Marc Zyngieraca268d2014-12-12 10:51:23 +00002464{
2465 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2466 u32 event = its_get_event_id(d);
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02002467 const struct cpumask *cpu_mask = cpu_online_mask;
Marc Zyngier0d224d32017-08-18 09:39:18 +01002468 int cpu;
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02002469
2470 /* get the cpu_mask of local node */
2471 if (its_dev->its->numa_node >= 0)
2472 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
Marc Zyngieraca268d2014-12-12 10:51:23 +00002473
Marc Zyngier591e5be2015-07-17 10:46:42 +01002474 /* Bind the LPI to the first possible CPU */
Yang Yingliangc1797b12018-06-22 10:52:51 +01002475 cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
2476 if (cpu >= nr_cpu_ids) {
2477 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)
2478 return -EINVAL;
2479
2480 cpu = cpumask_first(cpu_online_mask);
2481 }
2482
Marc Zyngier0d224d32017-08-18 09:39:18 +01002483 its_dev->event_map.col_map[event] = cpu;
2484 irq_data_update_effective_affinity(d, cpumask_of(cpu));
Marc Zyngier591e5be2015-07-17 10:46:42 +01002485
Marc Zyngieraca268d2014-12-12 10:51:23 +00002486 /* Map the GIC IRQ and event to the device */
Marc Zyngier6a25ad32016-12-20 15:52:26 +00002487 its_send_mapti(its_dev, d->hwirq, event);
Thomas Gleixner72491642017-09-13 23:29:10 +02002488 return 0;
Marc Zyngieraca268d2014-12-12 10:51:23 +00002489}
2490
2491static void its_irq_domain_deactivate(struct irq_domain *domain,
2492 struct irq_data *d)
2493{
2494 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2495 u32 event = its_get_event_id(d);
2496
2497 /* Stop the delivery of interrupts */
2498 its_send_discard(its_dev, event);
2499}
2500
Marc Zyngierb48ac832014-11-24 14:35:16 +00002501static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
2502 unsigned int nr_irqs)
2503{
2504 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
2505 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2506 int i;
2507
2508 for (i = 0; i < nr_irqs; i++) {
2509 struct irq_data *data = irq_domain_get_irq_data(domain,
2510 virq + i);
Marc Zyngieraca268d2014-12-12 10:51:23 +00002511 u32 event = its_get_event_id(data);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002512
2513 /* Mark interrupt index as unused */
Marc Zyngier591e5be2015-07-17 10:46:42 +01002514 clear_bit(event, its_dev->event_map.lpi_map);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002515
2516 /* Nuke the entry in the domain */
Marc Zyngier2da39942014-12-12 10:51:22 +00002517 irq_domain_reset_irq_data(data);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002518 }
2519
2520 /* If all interrupts have been freed, start mopping the floor */
Marc Zyngier591e5be2015-07-17 10:46:42 +01002521 if (bitmap_empty(its_dev->event_map.lpi_map,
2522 its_dev->event_map.nr_lpis)) {
Marc Zyngier38dd7c42018-05-27 17:03:03 +01002523 its_lpi_free(its_dev->event_map.lpi_map,
2524 its_dev->event_map.lpi_base,
2525 its_dev->event_map.nr_lpis);
Marc Zyngiercf2be8b2016-12-19 18:49:59 +00002526 kfree(its_dev->event_map.col_map);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002527
2528 /* Unmap device/itt */
2529 its_send_mapd(its_dev, 0);
2530 its_free_device(its_dev);
2531 }
2532
2533 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
2534}
2535
2536static const struct irq_domain_ops its_domain_ops = {
2537 .alloc = its_irq_domain_alloc,
2538 .free = its_irq_domain_free,
Marc Zyngieraca268d2014-12-12 10:51:23 +00002539 .activate = its_irq_domain_activate,
2540 .deactivate = its_irq_domain_deactivate,
Marc Zyngierb48ac832014-11-24 14:35:16 +00002541};
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00002542
Marc Zyngier20b3d542016-12-20 15:23:22 +00002543/*
2544 * This is insane.
2545 *
2546 * If a GICv4 doesn't implement Direct LPIs (which is extremely
2547 * likely), the only way to perform an invalidate is to use a fake
2548 * device to issue an INV command, implying that the LPI has first
2549 * been mapped to some event on that device. Since this is not exactly
2550 * cheap, we try to keep that mapping around as long as possible, and
2551 * only issue an UNMAP if we're short on available slots.
2552 *
2553 * Broken by design(tm).
2554 */
2555static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
2556{
2557 /* Already unmapped? */
2558 if (vpe->vpe_proxy_event == -1)
2559 return;
2560
2561 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
2562 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
2563
2564 /*
2565 * We don't track empty slots at all, so let's move the
2566 * next_victim pointer if we can quickly reuse that slot
2567 * instead of nuking an existing entry. Not clear that this is
2568 * always a win though, and this might just generate a ripple
2569 * effect... Let's just hope VPEs don't migrate too often.
2570 */
2571 if (vpe_proxy.vpes[vpe_proxy.next_victim])
2572 vpe_proxy.next_victim = vpe->vpe_proxy_event;
2573
2574 vpe->vpe_proxy_event = -1;
2575}
2576
2577static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
2578{
2579 if (!gic_rdists->has_direct_lpi) {
2580 unsigned long flags;
2581
2582 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2583 its_vpe_db_proxy_unmap_locked(vpe);
2584 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2585 }
2586}
2587
2588static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
2589{
2590 /* Already mapped? */
2591 if (vpe->vpe_proxy_event != -1)
2592 return;
2593
2594 /* This slot was already allocated. Kick the other VPE out. */
2595 if (vpe_proxy.vpes[vpe_proxy.next_victim])
2596 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
2597
2598 /* Map the new VPE instead */
2599 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
2600 vpe->vpe_proxy_event = vpe_proxy.next_victim;
2601 vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
2602
2603 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
2604 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
2605}
2606
Marc Zyngier958b90d2017-08-18 16:14:17 +01002607static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
2608{
2609 unsigned long flags;
2610 struct its_collection *target_col;
2611
2612 if (gic_rdists->has_direct_lpi) {
2613 void __iomem *rdbase;
2614
2615 rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
2616 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
2617 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2618 cpu_relax();
2619
2620 return;
2621 }
2622
2623 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2624
2625 its_vpe_db_proxy_map_locked(vpe);
2626
2627 target_col = &vpe_proxy.dev->its->collections[to];
2628 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
2629 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
2630
2631 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2632}
2633
Marc Zyngier3171a472016-12-20 15:17:28 +00002634static int its_vpe_set_affinity(struct irq_data *d,
2635 const struct cpumask *mask_val,
2636 bool force)
2637{
2638 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2639 int cpu = cpumask_first(mask_val);
2640
2641 /*
2642 * Changing affinity is mega expensive, so let's be as lazy as
Marc Zyngier20b3d542016-12-20 15:23:22 +00002643 * we can and only do it if we really have to. Also, if mapped
Marc Zyngier958b90d2017-08-18 16:14:17 +01002644 * into the proxy device, we need to move the doorbell
2645 * interrupt to its new location.
Marc Zyngier3171a472016-12-20 15:17:28 +00002646 */
2647 if (vpe->col_idx != cpu) {
Marc Zyngier958b90d2017-08-18 16:14:17 +01002648 int from = vpe->col_idx;
2649
Marc Zyngier3171a472016-12-20 15:17:28 +00002650 vpe->col_idx = cpu;
2651 its_send_vmovp(vpe);
Marc Zyngier958b90d2017-08-18 16:14:17 +01002652 its_vpe_db_proxy_move(vpe, from, cpu);
Marc Zyngier3171a472016-12-20 15:17:28 +00002653 }
2654
Marc Zyngier44c4c252017-10-19 10:11:34 +01002655 irq_data_update_effective_affinity(d, cpumask_of(cpu));
2656
Marc Zyngier3171a472016-12-20 15:17:28 +00002657 return IRQ_SET_MASK_OK_DONE;
2658}
2659
Marc Zyngiere643d802016-12-20 15:09:31 +00002660static void its_vpe_schedule(struct its_vpe *vpe)
2661{
Robin Murphy50c33092018-02-16 16:57:56 +00002662 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
Marc Zyngiere643d802016-12-20 15:09:31 +00002663 u64 val;
2664
2665 /* Schedule the VPE */
2666 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
2667 GENMASK_ULL(51, 12);
2668 val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
2669 val |= GICR_VPROPBASER_RaWb;
2670 val |= GICR_VPROPBASER_InnerShareable;
2671 gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2672
2673 val = virt_to_phys(page_address(vpe->vpt_page)) &
2674 GENMASK_ULL(51, 16);
2675 val |= GICR_VPENDBASER_RaWaWb;
2676 val |= GICR_VPENDBASER_NonShareable;
2677 /*
2678 * There is no good way of finding out if the pending table is
2679 * empty as we can race against the doorbell interrupt very
2680 * easily. So in the end, vpe->pending_last is only an
2681 * indication that the vcpu has something pending, not one
2682 * that the pending table is empty. A good implementation
2683 * would be able to read its coarse map pretty quickly anyway,
2684 * making this a tolerable issue.
2685 */
2686 val |= GICR_VPENDBASER_PendingLast;
2687 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
2688 val |= GICR_VPENDBASER_Valid;
2689 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2690}
2691
2692static void its_vpe_deschedule(struct its_vpe *vpe)
2693{
Robin Murphy50c33092018-02-16 16:57:56 +00002694 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
Marc Zyngiere643d802016-12-20 15:09:31 +00002695 u32 count = 1000000; /* 1s! */
2696 bool clean;
2697 u64 val;
2698
2699 /* We're being scheduled out */
2700 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2701 val &= ~GICR_VPENDBASER_Valid;
2702 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2703
2704 do {
2705 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2706 clean = !(val & GICR_VPENDBASER_Dirty);
2707 if (!clean) {
2708 count--;
2709 cpu_relax();
2710 udelay(1);
2711 }
2712 } while (!clean && count);
2713
2714 if (unlikely(!clean && !count)) {
2715 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
2716 vpe->idai = false;
2717 vpe->pending_last = true;
2718 } else {
2719 vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
2720 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
2721 }
2722}
2723
Marc Zyngier40619a22017-10-08 15:16:09 +01002724static void its_vpe_invall(struct its_vpe *vpe)
2725{
2726 struct its_node *its;
2727
2728 list_for_each_entry(its, &its_nodes, entry) {
2729 if (!its->is_v4)
2730 continue;
2731
Marc Zyngier2247e1b2017-10-08 18:50:36 +01002732 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
2733 continue;
2734
Marc Zyngier3c1ccee2017-10-09 13:17:43 +01002735 /*
2736 * Sending a VINVALL to a single ITS is enough, as all
2737 * we need is to reach the redistributors.
2738 */
Marc Zyngier40619a22017-10-08 15:16:09 +01002739 its_send_vinvall(its, vpe);
Marc Zyngier3c1ccee2017-10-09 13:17:43 +01002740 return;
Marc Zyngier40619a22017-10-08 15:16:09 +01002741 }
2742}
2743
Marc Zyngiere643d802016-12-20 15:09:31 +00002744static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
2745{
2746 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2747 struct its_cmd_info *info = vcpu_info;
2748
2749 switch (info->cmd_type) {
2750 case SCHEDULE_VPE:
2751 its_vpe_schedule(vpe);
2752 return 0;
2753
2754 case DESCHEDULE_VPE:
2755 its_vpe_deschedule(vpe);
2756 return 0;
2757
Marc Zyngier5e2f7642016-12-20 15:10:50 +00002758 case INVALL_VPE:
Marc Zyngier40619a22017-10-08 15:16:09 +01002759 its_vpe_invall(vpe);
Marc Zyngier5e2f7642016-12-20 15:10:50 +00002760 return 0;
2761
Marc Zyngiere643d802016-12-20 15:09:31 +00002762 default:
2763 return -EINVAL;
2764 }
2765}
2766
Marc Zyngier20b3d542016-12-20 15:23:22 +00002767static void its_vpe_send_cmd(struct its_vpe *vpe,
2768 void (*cmd)(struct its_device *, u32))
2769{
2770 unsigned long flags;
2771
2772 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2773
2774 its_vpe_db_proxy_map_locked(vpe);
2775 cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
2776
2777 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2778}
2779
Marc Zyngierf6a91da2016-12-20 15:20:38 +00002780static void its_vpe_send_inv(struct irq_data *d)
2781{
2782 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
Marc Zyngierf6a91da2016-12-20 15:20:38 +00002783
Marc Zyngier20b3d542016-12-20 15:23:22 +00002784 if (gic_rdists->has_direct_lpi) {
2785 void __iomem *rdbase;
2786
2787 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
2788 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR);
2789 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2790 cpu_relax();
2791 } else {
2792 its_vpe_send_cmd(vpe, its_send_inv);
2793 }
Marc Zyngierf6a91da2016-12-20 15:20:38 +00002794}
2795
2796static void its_vpe_mask_irq(struct irq_data *d)
2797{
2798 /*
2799 * We need to unmask the LPI, which is described by the parent
2800 * irq_data. Instead of calling into the parent (which won't
2801 * exactly do the right thing, let's simply use the
2802 * parent_data pointer. Yes, I'm naughty.
2803 */
2804 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
2805 its_vpe_send_inv(d);
2806}
2807
2808static void its_vpe_unmask_irq(struct irq_data *d)
2809{
2810 /* Same hack as above... */
2811 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
2812 its_vpe_send_inv(d);
2813}
2814
Marc Zyngiere57a3e282017-07-31 14:47:24 +01002815static int its_vpe_set_irqchip_state(struct irq_data *d,
2816 enum irqchip_irq_state which,
2817 bool state)
2818{
2819 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2820
2821 if (which != IRQCHIP_STATE_PENDING)
2822 return -EINVAL;
2823
2824 if (gic_rdists->has_direct_lpi) {
2825 void __iomem *rdbase;
2826
2827 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
2828 if (state) {
2829 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
2830 } else {
2831 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
2832 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2833 cpu_relax();
2834 }
2835 } else {
2836 if (state)
2837 its_vpe_send_cmd(vpe, its_send_int);
2838 else
2839 its_vpe_send_cmd(vpe, its_send_clear);
2840 }
2841
2842 return 0;
2843}
2844
Marc Zyngier8fff27a2016-12-20 13:41:55 +00002845static struct irq_chip its_vpe_irq_chip = {
2846 .name = "GICv4-vpe",
Marc Zyngierf6a91da2016-12-20 15:20:38 +00002847 .irq_mask = its_vpe_mask_irq,
2848 .irq_unmask = its_vpe_unmask_irq,
2849 .irq_eoi = irq_chip_eoi_parent,
Marc Zyngier3171a472016-12-20 15:17:28 +00002850 .irq_set_affinity = its_vpe_set_affinity,
Marc Zyngiere57a3e282017-07-31 14:47:24 +01002851 .irq_set_irqchip_state = its_vpe_set_irqchip_state,
Marc Zyngiere643d802016-12-20 15:09:31 +00002852 .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
Marc Zyngier8fff27a2016-12-20 13:41:55 +00002853};
2854
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002855static int its_vpe_id_alloc(void)
2856{
Shanker Donthineni32bd44d2017-10-07 15:43:48 -05002857 return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002858}
2859
2860static void its_vpe_id_free(u16 id)
2861{
2862 ida_simple_remove(&its_vpeid_ida, id);
2863}
2864
2865static int its_vpe_init(struct its_vpe *vpe)
2866{
2867 struct page *vpt_page;
2868 int vpe_id;
2869
2870 /* Allocate vpe_id */
2871 vpe_id = its_vpe_id_alloc();
2872 if (vpe_id < 0)
2873 return vpe_id;
2874
2875 /* Allocate VPT */
2876 vpt_page = its_allocate_pending_table(GFP_KERNEL);
2877 if (!vpt_page) {
2878 its_vpe_id_free(vpe_id);
2879 return -ENOMEM;
2880 }
2881
2882 if (!its_alloc_vpe_table(vpe_id)) {
2883 its_vpe_id_free(vpe_id);
2884 its_free_pending_table(vpe->vpt_page);
2885 return -ENOMEM;
2886 }
2887
2888 vpe->vpe_id = vpe_id;
2889 vpe->vpt_page = vpt_page;
Marc Zyngier20b3d542016-12-20 15:23:22 +00002890 vpe->vpe_proxy_event = -1;
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002891
2892 return 0;
2893}
2894
2895static void its_vpe_teardown(struct its_vpe *vpe)
2896{
Marc Zyngier20b3d542016-12-20 15:23:22 +00002897 its_vpe_db_proxy_unmap(vpe);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002898 its_vpe_id_free(vpe->vpe_id);
2899 its_free_pending_table(vpe->vpt_page);
2900}
2901
2902static void its_vpe_irq_domain_free(struct irq_domain *domain,
2903 unsigned int virq,
2904 unsigned int nr_irqs)
2905{
2906 struct its_vm *vm = domain->host_data;
2907 int i;
2908
2909 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
2910
2911 for (i = 0; i < nr_irqs; i++) {
2912 struct irq_data *data = irq_domain_get_irq_data(domain,
2913 virq + i);
2914 struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
2915
2916 BUG_ON(vm != vpe->its_vm);
2917
2918 clear_bit(data->hwirq, vm->db_bitmap);
2919 its_vpe_teardown(vpe);
2920 irq_domain_reset_irq_data(data);
2921 }
2922
2923 if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
Marc Zyngier38dd7c42018-05-27 17:03:03 +01002924 its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002925 its_free_prop_table(vm->vprop_page);
2926 }
2927}
2928
2929static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2930 unsigned int nr_irqs, void *args)
2931{
2932 struct its_vm *vm = args;
2933 unsigned long *bitmap;
2934 struct page *vprop_page;
2935 int base, nr_ids, i, err = 0;
2936
2937 BUG_ON(!vm);
2938
Marc Zyngier38dd7c42018-05-27 17:03:03 +01002939 bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002940 if (!bitmap)
2941 return -ENOMEM;
2942
2943 if (nr_ids < nr_irqs) {
Marc Zyngier38dd7c42018-05-27 17:03:03 +01002944 its_lpi_free(bitmap, base, nr_ids);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002945 return -ENOMEM;
2946 }
2947
2948 vprop_page = its_allocate_prop_table(GFP_KERNEL);
2949 if (!vprop_page) {
Marc Zyngier38dd7c42018-05-27 17:03:03 +01002950 its_lpi_free(bitmap, base, nr_ids);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002951 return -ENOMEM;
2952 }
2953
2954 vm->db_bitmap = bitmap;
2955 vm->db_lpi_base = base;
2956 vm->nr_db_lpis = nr_ids;
2957 vm->vprop_page = vprop_page;
2958
2959 for (i = 0; i < nr_irqs; i++) {
2960 vm->vpes[i]->vpe_db_lpi = base + i;
2961 err = its_vpe_init(vm->vpes[i]);
2962 if (err)
2963 break;
2964 err = its_irq_gic_domain_alloc(domain, virq + i,
2965 vm->vpes[i]->vpe_db_lpi);
2966 if (err)
2967 break;
2968 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
2969 &its_vpe_irq_chip, vm->vpes[i]);
2970 set_bit(i, bitmap);
2971 }
2972
2973 if (err) {
2974 if (i > 0)
2975 its_vpe_irq_domain_free(domain, virq, i - 1);
2976
Marc Zyngier38dd7c42018-05-27 17:03:03 +01002977 its_lpi_free(bitmap, base, nr_ids);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002978 its_free_prop_table(vprop_page);
2979 }
2980
2981 return err;
2982}
2983
Thomas Gleixner72491642017-09-13 23:29:10 +02002984static int its_vpe_irq_domain_activate(struct irq_domain *domain,
Thomas Gleixner702cb0a2017-12-29 16:59:06 +01002985 struct irq_data *d, bool reserve)
Marc Zyngiereb781922016-12-20 14:47:05 +00002986{
2987 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
Marc Zyngier40619a22017-10-08 15:16:09 +01002988 struct its_node *its;
Marc Zyngiereb781922016-12-20 14:47:05 +00002989
Marc Zyngier2247e1b2017-10-08 18:50:36 +01002990 /* If we use the list map, we issue VMAPP on demand... */
2991 if (its_list_map)
Marc Zyngier6ef930f2017-11-07 10:04:38 +00002992 return 0;
Marc Zyngiereb781922016-12-20 14:47:05 +00002993
2994 /* Map the VPE to the first possible CPU */
2995 vpe->col_idx = cpumask_first(cpu_online_mask);
Marc Zyngier40619a22017-10-08 15:16:09 +01002996
2997 list_for_each_entry(its, &its_nodes, entry) {
2998 if (!its->is_v4)
2999 continue;
3000
Marc Zyngier75fd9512017-10-08 18:46:39 +01003001 its_send_vmapp(its, vpe, true);
Marc Zyngier40619a22017-10-08 15:16:09 +01003002 its_send_vinvall(its, vpe);
3003 }
3004
Marc Zyngier44c4c252017-10-19 10:11:34 +01003005 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
3006
Thomas Gleixner72491642017-09-13 23:29:10 +02003007 return 0;
Marc Zyngiereb781922016-12-20 14:47:05 +00003008}
3009
3010static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
3011 struct irq_data *d)
3012{
3013 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
Marc Zyngier75fd9512017-10-08 18:46:39 +01003014 struct its_node *its;
Marc Zyngiereb781922016-12-20 14:47:05 +00003015
Marc Zyngier2247e1b2017-10-08 18:50:36 +01003016 /*
3017 * If we use the list map, we unmap the VPE once no VLPIs are
3018 * associated with the VM.
3019 */
3020 if (its_list_map)
3021 return;
3022
Marc Zyngier75fd9512017-10-08 18:46:39 +01003023 list_for_each_entry(its, &its_nodes, entry) {
3024 if (!its->is_v4)
3025 continue;
3026
3027 its_send_vmapp(its, vpe, false);
3028 }
Marc Zyngiereb781922016-12-20 14:47:05 +00003029}
3030
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003031static const struct irq_domain_ops its_vpe_domain_ops = {
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003032 .alloc = its_vpe_irq_domain_alloc,
3033 .free = its_vpe_irq_domain_free,
Marc Zyngiereb781922016-12-20 14:47:05 +00003034 .activate = its_vpe_irq_domain_activate,
3035 .deactivate = its_vpe_irq_domain_deactivate,
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003036};
3037
Yun Wu4559fbb2015-03-06 16:37:50 +00003038static int its_force_quiescent(void __iomem *base)
3039{
3040 u32 count = 1000000; /* 1s */
3041 u32 val;
3042
3043 val = readl_relaxed(base + GITS_CTLR);
David Daney7611da82016-08-18 15:41:58 -07003044 /*
3045 * GIC architecture specification requires the ITS to be both
3046 * disabled and quiescent for writes to GITS_BASER<n> or
3047 * GITS_CBASER to not have UNPREDICTABLE results.
3048 */
3049 if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
Yun Wu4559fbb2015-03-06 16:37:50 +00003050 return 0;
3051
3052 /* Disable the generation of all interrupts to this ITS */
Marc Zyngierd51c4b42017-06-27 21:24:25 +01003053 val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
Yun Wu4559fbb2015-03-06 16:37:50 +00003054 writel_relaxed(val, base + GITS_CTLR);
3055
3056 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
3057 while (1) {
3058 val = readl_relaxed(base + GITS_CTLR);
3059 if (val & GITS_CTLR_QUIESCENT)
3060 return 0;
3061
3062 count--;
3063 if (!count)
3064 return -EBUSY;
3065
3066 cpu_relax();
3067 udelay(1);
3068 }
3069}
3070
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01003071static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
Robert Richter94100972015-09-21 22:58:38 +02003072{
3073 struct its_node *its = data;
3074
Ard Biesheuvelfa150012017-10-17 17:55:54 +01003075 /* erratum 22375: only alloc 8MB table size */
3076 its->device_ids = 0x14; /* 20 bits, 8MB */
Robert Richter94100972015-09-21 22:58:38 +02003077 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01003078
3079 return true;
Robert Richter94100972015-09-21 22:58:38 +02003080}
3081
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01003082static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02003083{
3084 struct its_node *its = data;
3085
3086 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01003087
3088 return true;
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02003089}
3090
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01003091static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
Shanker Donthineni90922a22017-03-07 08:20:38 -06003092{
3093 struct its_node *its = data;
3094
3095 /* On QDF2400, the size of the ITE is 16Bytes */
3096 its->ite_size = 16;
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01003097
3098 return true;
Shanker Donthineni90922a22017-03-07 08:20:38 -06003099}
3100
Ard Biesheuvel558b0162017-10-17 17:55:56 +01003101static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
3102{
3103 struct its_node *its = its_dev->its;
3104
3105 /*
3106 * The Socionext Synquacer SoC has a so-called 'pre-ITS',
3107 * which maps 32-bit writes targeted at a separate window of
3108 * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
3109 * with device ID taken from bits [device_id_bits + 1:2] of
3110 * the window offset.
3111 */
3112 return its->pre_its_base + (its_dev->device_id << 2);
3113}
3114
3115static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
3116{
3117 struct its_node *its = data;
3118 u32 pre_its_window[2];
3119 u32 ids;
3120
3121 if (!fwnode_property_read_u32_array(its->fwnode_handle,
3122 "socionext,synquacer-pre-its",
3123 pre_its_window,
3124 ARRAY_SIZE(pre_its_window))) {
3125
3126 its->pre_its_base = pre_its_window[0];
3127 its->get_msi_base = its_irq_get_msi_base_pre_its;
3128
3129 ids = ilog2(pre_its_window[1]) - 2;
3130 if (its->device_ids > ids)
3131 its->device_ids = ids;
3132
3133 /* the pre-ITS breaks isolation, so disable MSI remapping */
3134 its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
3135 return true;
3136 }
3137 return false;
3138}
3139
Marc Zyngier5c9a8822017-07-28 21:20:37 +01003140static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
3141{
3142 struct its_node *its = data;
3143
3144 /*
3145 * Hip07 insists on using the wrong address for the VLPI
3146 * page. Trick it into doing the right thing...
3147 */
3148 its->vlpi_redist_offset = SZ_128K;
3149 return true;
Marc Zyngiercc2d3212014-11-24 14:35:11 +00003150}
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003151
Robert Richter67510cc2015-09-21 22:58:37 +02003152static const struct gic_quirk its_quirks[] = {
Robert Richter94100972015-09-21 22:58:38 +02003153#ifdef CONFIG_CAVIUM_ERRATUM_22375
3154 {
3155 .desc = "ITS: Cavium errata 22375, 24313",
3156 .iidr = 0xa100034c, /* ThunderX pass 1.x */
3157 .mask = 0xffff0fff,
3158 .init = its_enable_quirk_cavium_22375,
3159 },
3160#endif
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02003161#ifdef CONFIG_CAVIUM_ERRATUM_23144
3162 {
3163 .desc = "ITS: Cavium erratum 23144",
3164 .iidr = 0xa100034c, /* ThunderX pass 1.x */
3165 .mask = 0xffff0fff,
3166 .init = its_enable_quirk_cavium_23144,
3167 },
3168#endif
Shanker Donthineni90922a22017-03-07 08:20:38 -06003169#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
3170 {
3171 .desc = "ITS: QDF2400 erratum 0065",
3172 .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */
3173 .mask = 0xffffffff,
3174 .init = its_enable_quirk_qdf2400_e0065,
3175 },
3176#endif
Ard Biesheuvel558b0162017-10-17 17:55:56 +01003177#ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
3178 {
3179 /*
3180 * The Socionext Synquacer SoC incorporates ARM's own GIC-500
3181 * implementation, but with a 'pre-ITS' added that requires
3182 * special handling in software.
3183 */
3184 .desc = "ITS: Socionext Synquacer pre-ITS",
3185 .iidr = 0x0001143b,
3186 .mask = 0xffffffff,
3187 .init = its_enable_quirk_socionext_synquacer,
3188 },
3189#endif
Marc Zyngier5c9a8822017-07-28 21:20:37 +01003190#ifdef CONFIG_HISILICON_ERRATUM_161600802
3191 {
3192 .desc = "ITS: Hip07 erratum 161600802",
3193 .iidr = 0x00000004,
3194 .mask = 0xffffffff,
3195 .init = its_enable_quirk_hip07_161600802,
3196 },
3197#endif
Robert Richter67510cc2015-09-21 22:58:37 +02003198 {
3199 }
3200};
3201
3202static void its_enable_quirks(struct its_node *its)
3203{
3204 u32 iidr = readl_relaxed(its->base + GITS_IIDR);
3205
3206 gic_enable_quirks(iidr, its_quirks, its);
3207}
3208
Derek Basehoredba0bc72018-02-28 21:48:18 -08003209static int its_save_disable(void)
3210{
3211 struct its_node *its;
3212 int err = 0;
3213
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02003214 raw_spin_lock(&its_lock);
Derek Basehoredba0bc72018-02-28 21:48:18 -08003215 list_for_each_entry(its, &its_nodes, entry) {
3216 void __iomem *base;
3217
3218 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
3219 continue;
3220
3221 base = its->base;
3222 its->ctlr_save = readl_relaxed(base + GITS_CTLR);
3223 err = its_force_quiescent(base);
3224 if (err) {
3225 pr_err("ITS@%pa: failed to quiesce: %d\n",
3226 &its->phys_base, err);
3227 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
3228 goto err;
3229 }
3230
3231 its->cbaser_save = gits_read_cbaser(base + GITS_CBASER);
3232 }
3233
3234err:
3235 if (err) {
3236 list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
3237 void __iomem *base;
3238
3239 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
3240 continue;
3241
3242 base = its->base;
3243 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
3244 }
3245 }
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02003246 raw_spin_unlock(&its_lock);
Derek Basehoredba0bc72018-02-28 21:48:18 -08003247
3248 return err;
3249}
3250
3251static void its_restore_enable(void)
3252{
3253 struct its_node *its;
3254 int ret;
3255
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02003256 raw_spin_lock(&its_lock);
Derek Basehoredba0bc72018-02-28 21:48:18 -08003257 list_for_each_entry(its, &its_nodes, entry) {
3258 void __iomem *base;
3259 int i;
3260
3261 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
3262 continue;
3263
3264 base = its->base;
3265
3266 /*
3267 * Make sure that the ITS is disabled. If it fails to quiesce,
3268 * don't restore it since writing to CBASER or BASER<n>
3269 * registers is undefined according to the GIC v3 ITS
3270 * Specification.
3271 */
3272 ret = its_force_quiescent(base);
3273 if (ret) {
3274 pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
3275 &its->phys_base, ret);
3276 continue;
3277 }
3278
3279 gits_write_cbaser(its->cbaser_save, base + GITS_CBASER);
3280
3281 /*
3282 * Writing CBASER resets CREADR to 0, so make CWRITER and
3283 * cmd_write line up with it.
3284 */
3285 its->cmd_write = its->cmd_base;
3286 gits_write_cwriter(0, base + GITS_CWRITER);
3287
3288 /* Restore GITS_BASER from the value cache. */
3289 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
3290 struct its_baser *baser = &its->tables[i];
3291
3292 if (!(baser->val & GITS_BASER_VALID))
3293 continue;
3294
3295 its_write_baser(its, baser, baser->val);
3296 }
3297 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
Derek Basehore920181c2018-02-28 21:48:20 -08003298
3299 /*
3300 * Reinit the collection if it's stored in the ITS. This is
3301 * indicated by the col_id being less than the HCC field.
3302 * CID < HCC as specified in the GIC v3 Documentation.
3303 */
3304 if (its->collections[smp_processor_id()].col_id <
3305 GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
3306 its_cpu_init_collection(its);
Derek Basehoredba0bc72018-02-28 21:48:18 -08003307 }
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02003308 raw_spin_unlock(&its_lock);
Derek Basehoredba0bc72018-02-28 21:48:18 -08003309}
3310
3311static struct syscore_ops its_syscore_ops = {
3312 .suspend = its_save_disable,
3313 .resume = its_restore_enable,
3314};
3315
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003316static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003317{
3318 struct irq_domain *inner_domain;
3319 struct msi_domain_info *info;
3320
3321 info = kzalloc(sizeof(*info), GFP_KERNEL);
3322 if (!info)
3323 return -ENOMEM;
3324
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003325 inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its);
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003326 if (!inner_domain) {
3327 kfree(info);
3328 return -ENOMEM;
3329 }
3330
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003331 inner_domain->parent = its_parent;
Marc Zyngier96f0d932017-06-22 11:42:50 +01003332 irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
Ard Biesheuvel558b0162017-10-17 17:55:56 +01003333 inner_domain->flags |= its->msi_domain_flags;
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003334 info->ops = &its_msi_domain_ops;
3335 info->data = its;
3336 inner_domain->host_data = info;
3337
3338 return 0;
3339}
3340
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003341static int its_init_vpe_domain(void)
3342{
Marc Zyngier20b3d542016-12-20 15:23:22 +00003343 struct its_node *its;
3344 u32 devid;
3345 int entries;
3346
3347 if (gic_rdists->has_direct_lpi) {
3348 pr_info("ITS: Using DirectLPI for VPE invalidation\n");
3349 return 0;
3350 }
3351
3352 /* Any ITS will do, even if not v4 */
3353 its = list_first_entry(&its_nodes, struct its_node, entry);
3354
3355 entries = roundup_pow_of_two(nr_cpu_ids);
Kees Cook6396bb22018-06-12 14:03:40 -07003356 vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes),
Marc Zyngier20b3d542016-12-20 15:23:22 +00003357 GFP_KERNEL);
3358 if (!vpe_proxy.vpes) {
3359 pr_err("ITS: Can't allocate GICv4 proxy device array\n");
3360 return -ENOMEM;
3361 }
3362
3363 /* Use the last possible DevID */
3364 devid = GENMASK(its->device_ids - 1, 0);
3365 vpe_proxy.dev = its_create_device(its, devid, entries, false);
3366 if (!vpe_proxy.dev) {
3367 kfree(vpe_proxy.vpes);
3368 pr_err("ITS: Can't allocate GICv4 proxy device\n");
3369 return -ENOMEM;
3370 }
3371
Shanker Donthinenic427a472017-09-23 13:50:19 -05003372 BUG_ON(entries > vpe_proxy.dev->nr_ites);
Marc Zyngier20b3d542016-12-20 15:23:22 +00003373
3374 raw_spin_lock_init(&vpe_proxy.lock);
3375 vpe_proxy.next_victim = 0;
3376 pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
3377 devid, vpe_proxy.dev->nr_ites);
3378
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003379 return 0;
3380}
3381
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003382static int __init its_compute_its_list_map(struct resource *res,
3383 void __iomem *its_base)
3384{
3385 int its_number;
3386 u32 ctlr;
3387
3388 /*
3389 * This is assumed to be done early enough that we're
3390 * guaranteed to be single-threaded, hence no
3391 * locking. Should this change, we should address
3392 * this.
3393 */
Marc Zyngierab604912017-10-08 18:48:06 +01003394 its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
3395 if (its_number >= GICv4_ITS_LIST_MAX) {
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003396 pr_err("ITS@%pa: No ITSList entry available!\n",
3397 &res->start);
3398 return -EINVAL;
3399 }
3400
3401 ctlr = readl_relaxed(its_base + GITS_CTLR);
3402 ctlr &= ~GITS_CTLR_ITS_NUMBER;
3403 ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
3404 writel_relaxed(ctlr, its_base + GITS_CTLR);
3405 ctlr = readl_relaxed(its_base + GITS_CTLR);
3406 if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
3407 its_number = ctlr & GITS_CTLR_ITS_NUMBER;
3408 its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
3409 }
3410
3411 if (test_and_set_bit(its_number, &its_list_map)) {
3412 pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
3413 &res->start, its_number);
3414 return -EINVAL;
3415 }
3416
3417 return its_number;
3418}
3419
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003420static int __init its_probe_one(struct resource *res,
3421 struct fwnode_handle *handle, int numa_node)
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003422{
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003423 struct its_node *its;
3424 void __iomem *its_base;
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003425 u32 val, ctlr;
3426 u64 baser, tmp, typer;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003427 int err;
3428
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003429 its_base = ioremap(res->start, resource_size(res));
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003430 if (!its_base) {
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003431 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003432 return -ENOMEM;
3433 }
3434
3435 val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
3436 if (val != 0x30 && val != 0x40) {
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003437 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003438 err = -ENODEV;
3439 goto out_unmap;
3440 }
3441
Yun Wu4559fbb2015-03-06 16:37:50 +00003442 err = its_force_quiescent(its_base);
3443 if (err) {
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003444 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
Yun Wu4559fbb2015-03-06 16:37:50 +00003445 goto out_unmap;
3446 }
3447
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003448 pr_info("ITS %pR\n", res);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003449
3450 its = kzalloc(sizeof(*its), GFP_KERNEL);
3451 if (!its) {
3452 err = -ENOMEM;
3453 goto out_unmap;
3454 }
3455
3456 raw_spin_lock_init(&its->lock);
3457 INIT_LIST_HEAD(&its->entry);
3458 INIT_LIST_HEAD(&its->its_device_list);
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003459 typer = gic_read_typer(its_base + GITS_TYPER);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003460 its->base = its_base;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003461 its->phys_base = res->start;
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003462 its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer);
Ard Biesheuvelfa150012017-10-17 17:55:54 +01003463 its->device_ids = GITS_TYPER_DEVBITS(typer);
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003464 its->is_v4 = !!(typer & GITS_TYPER_VLPIS);
3465 if (its->is_v4) {
3466 if (!(typer & GITS_TYPER_VMOVP)) {
3467 err = its_compute_its_list_map(res, its_base);
3468 if (err < 0)
3469 goto out_free_its;
3470
Marc Zyngierdebf6d02017-10-08 18:44:42 +01003471 its->list_nr = err;
3472
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003473 pr_info("ITS@%pa: Using ITS number %d\n",
3474 &res->start, err);
3475 } else {
3476 pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
3477 }
3478 }
3479
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003480 its->numa_node = numa_node;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003481
Robert Richter5bc13c22017-02-01 18:38:25 +01003482 its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
3483 get_order(ITS_CMD_QUEUE_SZ));
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003484 if (!its->cmd_base) {
3485 err = -ENOMEM;
3486 goto out_free_its;
3487 }
3488 its->cmd_write = its->cmd_base;
Ard Biesheuvel558b0162017-10-17 17:55:56 +01003489 its->fwnode_handle = handle;
3490 its->get_msi_base = its_irq_get_msi_base;
3491 its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003492
Robert Richter67510cc2015-09-21 22:58:37 +02003493 its_enable_quirks(its);
3494
Shanker Donthineni0e0b0f62016-06-06 18:17:31 -05003495 err = its_alloc_tables(its);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003496 if (err)
3497 goto out_free_cmd;
3498
3499 err = its_alloc_collections(its);
3500 if (err)
3501 goto out_free_tables;
3502
3503 baser = (virt_to_phys(its->cmd_base) |
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06003504 GITS_CBASER_RaWaWb |
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003505 GITS_CBASER_InnerShareable |
3506 (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
3507 GITS_CBASER_VALID);
3508
Vladimir Murzin0968a612016-11-02 11:54:06 +00003509 gits_write_cbaser(baser, its->base + GITS_CBASER);
3510 tmp = gits_read_cbaser(its->base + GITS_CBASER);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003511
Marc Zyngier4ad3e362015-03-27 14:15:04 +00003512 if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
Marc Zyngier241a3862015-03-27 14:15:05 +00003513 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
3514 /*
3515 * The HW reports non-shareable, we must
3516 * remove the cacheability attributes as
3517 * well.
3518 */
3519 baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
3520 GITS_CBASER_CACHEABILITY_MASK);
3521 baser |= GITS_CBASER_nC;
Vladimir Murzin0968a612016-11-02 11:54:06 +00003522 gits_write_cbaser(baser, its->base + GITS_CBASER);
Marc Zyngier241a3862015-03-27 14:15:05 +00003523 }
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003524 pr_info("ITS: using cache flushing for cmd queue\n");
3525 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
3526 }
3527
Vladimir Murzin0968a612016-11-02 11:54:06 +00003528 gits_write_cwriter(0, its->base + GITS_CWRITER);
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003529 ctlr = readl_relaxed(its->base + GITS_CTLR);
Marc Zyngierd51c4b42017-06-27 21:24:25 +01003530 ctlr |= GITS_CTLR_ENABLE;
3531 if (its->is_v4)
3532 ctlr |= GITS_CTLR_ImDe;
3533 writel_relaxed(ctlr, its->base + GITS_CTLR);
Marc Zyngier241a3862015-03-27 14:15:05 +00003534
Derek Basehoredba0bc72018-02-28 21:48:18 -08003535 if (GITS_TYPER_HCC(typer))
3536 its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE;
3537
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003538 err = its_init_domain(handle, its);
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003539 if (err)
3540 goto out_free_tables;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003541
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02003542 raw_spin_lock(&its_lock);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003543 list_add(&its->entry, &its_nodes);
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02003544 raw_spin_unlock(&its_lock);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003545
3546 return 0;
3547
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003548out_free_tables:
3549 its_free_tables(its);
3550out_free_cmd:
Robert Richter5bc13c22017-02-01 18:38:25 +01003551 free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003552out_free_its:
3553 kfree(its);
3554out_unmap:
3555 iounmap(its_base);
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003556 pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003557 return err;
3558}
3559
3560static bool gic_rdists_supports_plpis(void)
3561{
Marc Zyngier589ce5f2016-10-14 15:13:07 +01003562 return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003563}
3564
Shanker Donthineni6eb486b2018-03-21 20:58:49 -05003565static int redist_disable_lpis(void)
3566{
3567 void __iomem *rbase = gic_data_rdist_rd_base();
3568 u64 timeout = USEC_PER_SEC;
3569 u64 val;
3570
3571 if (!gic_rdists_supports_plpis()) {
3572 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
3573 return -ENXIO;
3574 }
3575
3576 val = readl_relaxed(rbase + GICR_CTLR);
3577 if (!(val & GICR_CTLR_ENABLE_LPIS))
3578 return 0;
3579
Marc Zyngier11e37d32018-07-27 13:38:54 +01003580 /*
3581 * If coming via a CPU hotplug event, we don't need to disable
3582 * LPIs before trying to re-enable them. They are already
3583 * configured and all is well in the world.
Marc Zyngierc440a9d2018-07-27 15:40:13 +01003584 *
3585 * If running with preallocated tables, there is nothing to do.
Marc Zyngier11e37d32018-07-27 13:38:54 +01003586 */
Marc Zyngierc440a9d2018-07-27 15:40:13 +01003587 if (gic_data_rdist()->lpi_enabled ||
3588 (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED))
Marc Zyngier11e37d32018-07-27 13:38:54 +01003589 return 0;
3590
3591 /*
3592 * From that point on, we only try to do some damage control.
3593 */
3594 pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
Shanker Donthineni6eb486b2018-03-21 20:58:49 -05003595 smp_processor_id());
3596 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
3597
3598 /* Disable LPIs */
3599 val &= ~GICR_CTLR_ENABLE_LPIS;
3600 writel_relaxed(val, rbase + GICR_CTLR);
3601
3602 /* Make sure any change to GICR_CTLR is observable by the GIC */
3603 dsb(sy);
3604
3605 /*
3606 * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
3607 * from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
3608 * Error out if we time out waiting for RWP to clear.
3609 */
3610 while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) {
3611 if (!timeout) {
3612 pr_err("CPU%d: Timeout while disabling LPIs\n",
3613 smp_processor_id());
3614 return -ETIMEDOUT;
3615 }
3616 udelay(1);
3617 timeout--;
3618 }
3619
3620 /*
3621 * After it has been written to 1, it is IMPLEMENTATION
3622 * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
3623 * cleared to 0. Error out if clearing the bit failed.
3624 */
3625 if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) {
3626 pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id());
3627 return -EBUSY;
3628 }
3629
3630 return 0;
3631}
3632
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003633int its_cpu_init(void)
3634{
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003635 if (!list_empty(&its_nodes)) {
Shanker Donthineni6eb486b2018-03-21 20:58:49 -05003636 int ret;
3637
3638 ret = redist_disable_lpis();
3639 if (ret)
3640 return ret;
3641
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003642 its_cpu_init_lpis();
Derek Basehore920181c2018-02-28 21:48:20 -08003643 its_cpu_init_collections();
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003644 }
3645
3646 return 0;
3647}
3648
Arvind Yadav935bba72017-06-22 16:05:30 +05303649static const struct of_device_id its_device_id[] = {
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003650 { .compatible = "arm,gic-v3-its", },
3651 {},
3652};
3653
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003654static int __init its_of_probe(struct device_node *node)
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003655{
3656 struct device_node *np;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003657 struct resource res;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003658
3659 for (np = of_find_matching_node(node, its_device_id); np;
3660 np = of_find_matching_node(np, its_device_id)) {
Stephen Boyd95a25622018-02-01 09:03:29 -08003661 if (!of_device_is_available(np))
3662 continue;
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003663 if (!of_property_read_bool(np, "msi-controller")) {
Rob Herringe81f54c2017-07-18 16:43:10 -05003664 pr_warn("%pOF: no msi-controller property, ITS ignored\n",
3665 np);
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003666 continue;
3667 }
3668
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003669 if (of_address_to_resource(np, 0, &res)) {
Rob Herringe81f54c2017-07-18 16:43:10 -05003670 pr_warn("%pOF: no regs?\n", np);
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003671 continue;
3672 }
3673
3674 its_probe_one(&res, &np->fwnode, of_node_to_nid(np));
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003675 }
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003676 return 0;
3677}
3678
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003679#ifdef CONFIG_ACPI
3680
3681#define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
3682
Robert Richterd1ce2632017-07-12 15:25:09 +02003683#ifdef CONFIG_ACPI_NUMA
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303684struct its_srat_map {
3685 /* numa node id */
3686 u32 numa_node;
3687 /* GIC ITS ID */
3688 u32 its_id;
3689};
3690
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003691static struct its_srat_map *its_srat_maps __initdata;
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303692static int its_in_srat __initdata;
3693
3694static int __init acpi_get_its_numa_node(u32 its_id)
3695{
3696 int i;
3697
3698 for (i = 0; i < its_in_srat; i++) {
3699 if (its_id == its_srat_maps[i].its_id)
3700 return its_srat_maps[i].numa_node;
3701 }
3702 return NUMA_NO_NODE;
3703}
3704
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003705static int __init gic_acpi_match_srat_its(struct acpi_subtable_header *header,
3706 const unsigned long end)
3707{
3708 return 0;
3709}
3710
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303711static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header,
3712 const unsigned long end)
3713{
3714 int node;
3715 struct acpi_srat_gic_its_affinity *its_affinity;
3716
3717 its_affinity = (struct acpi_srat_gic_its_affinity *)header;
3718 if (!its_affinity)
3719 return -EINVAL;
3720
3721 if (its_affinity->header.length < sizeof(*its_affinity)) {
3722 pr_err("SRAT: Invalid header length %d in ITS affinity\n",
3723 its_affinity->header.length);
3724 return -EINVAL;
3725 }
3726
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303727 node = acpi_map_pxm_to_node(its_affinity->proximity_domain);
3728
3729 if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
3730 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
3731 return 0;
3732 }
3733
3734 its_srat_maps[its_in_srat].numa_node = node;
3735 its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
3736 its_in_srat++;
3737 pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
3738 its_affinity->proximity_domain, its_affinity->its_id, node);
3739
3740 return 0;
3741}
3742
3743static void __init acpi_table_parse_srat_its(void)
3744{
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003745 int count;
3746
3747 count = acpi_table_parse_entries(ACPI_SIG_SRAT,
3748 sizeof(struct acpi_table_srat),
3749 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
3750 gic_acpi_match_srat_its, 0);
3751 if (count <= 0)
3752 return;
3753
Kees Cook6da2ec52018-06-12 13:55:00 -07003754 its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map),
3755 GFP_KERNEL);
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003756 if (!its_srat_maps) {
3757 pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n");
3758 return;
3759 }
3760
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303761 acpi_table_parse_entries(ACPI_SIG_SRAT,
3762 sizeof(struct acpi_table_srat),
3763 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
3764 gic_acpi_parse_srat_its, 0);
3765}
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003766
3767/* free the its_srat_maps after ITS probing */
3768static void __init acpi_its_srat_maps_free(void)
3769{
3770 kfree(its_srat_maps);
3771}
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303772#else
3773static void __init acpi_table_parse_srat_its(void) { }
3774static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003775static void __init acpi_its_srat_maps_free(void) { }
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303776#endif
3777
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003778static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header,
3779 const unsigned long end)
3780{
3781 struct acpi_madt_generic_translator *its_entry;
3782 struct fwnode_handle *dom_handle;
3783 struct resource res;
3784 int err;
3785
3786 its_entry = (struct acpi_madt_generic_translator *)header;
3787 memset(&res, 0, sizeof(res));
3788 res.start = its_entry->base_address;
3789 res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
3790 res.flags = IORESOURCE_MEM;
3791
3792 dom_handle = irq_domain_alloc_fwnode((void *)its_entry->base_address);
3793 if (!dom_handle) {
3794 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
3795 &res.start);
3796 return -ENOMEM;
3797 }
3798
Shameer Kolothum8b4282e2018-02-13 15:20:50 +00003799 err = iort_register_domain_token(its_entry->translation_id, res.start,
3800 dom_handle);
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003801 if (err) {
3802 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
3803 &res.start, its_entry->translation_id);
3804 goto dom_err;
3805 }
3806
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303807 err = its_probe_one(&res, dom_handle,
3808 acpi_get_its_numa_node(its_entry->translation_id));
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003809 if (!err)
3810 return 0;
3811
3812 iort_deregister_domain_token(its_entry->translation_id);
3813dom_err:
3814 irq_domain_free_fwnode(dom_handle);
3815 return err;
3816}
3817
3818static void __init its_acpi_probe(void)
3819{
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303820 acpi_table_parse_srat_its();
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003821 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
3822 gic_acpi_parse_madt_its, 0);
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003823 acpi_its_srat_maps_free();
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003824}
3825#else
3826static void __init its_acpi_probe(void) { }
3827#endif
3828
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003829int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
3830 struct irq_domain *parent_domain)
3831{
3832 struct device_node *of_node;
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003833 struct its_node *its;
3834 bool has_v4 = false;
3835 int err;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003836
3837 its_parent = parent_domain;
3838 of_node = to_of_node(handle);
3839 if (of_node)
3840 its_of_probe(of_node);
3841 else
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003842 its_acpi_probe();
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003843
3844 if (list_empty(&its_nodes)) {
3845 pr_warn("ITS: No ITS available, not enabling LPIs\n");
3846 return -ENXIO;
3847 }
3848
3849 gic_rdists = rdists;
Marc Zyngier11e37d32018-07-27 13:38:54 +01003850
3851 err = allocate_lpi_tables();
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003852 if (err)
3853 return err;
3854
3855 list_for_each_entry(its, &its_nodes, entry)
3856 has_v4 |= its->is_v4;
3857
3858 if (has_v4 & rdists->has_vlpis) {
Marc Zyngier3d63cb52016-12-20 15:31:54 +00003859 if (its_init_vpe_domain() ||
3860 its_init_v4(parent_domain, &its_vpe_domain_ops)) {
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003861 rdists->has_vlpis = false;
3862 pr_err("ITS: Disabling GICv4 support\n");
3863 }
3864 }
3865
Derek Basehoredba0bc72018-02-28 21:48:18 -08003866 register_syscore_ops(&its_syscore_ops);
3867
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003868 return 0;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003869}