blob: 9f26445eee4a2b1ec199d4f4b957809d71df2bff [file] [log] [blame]
Marc Zyngiercc2d3212014-11-24 14:35:11 +00001/*
Marc Zyngierd7276b82016-12-20 15:11:47 +00002 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
Marc Zyngiercc2d3212014-11-24 14:35:11 +00003 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +020018#include <linux/acpi.h>
Hanjun Guo8d3554b2017-03-07 20:39:59 +080019#include <linux/acpi_iort.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000020#include <linux/bitmap.h>
21#include <linux/cpu.h>
22#include <linux/delay.h>
Robin Murphy44bb7e22016-09-12 17:13:59 +010023#include <linux/dma-iommu.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000024#include <linux/interrupt.h>
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +020025#include <linux/irqdomain.h>
Marc Zyngier880cb3c2018-05-27 16:14:15 +010026#include <linux/list.h>
27#include <linux/list_sort.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000028#include <linux/log2.h>
29#include <linux/mm.h>
30#include <linux/msi.h>
31#include <linux/of.h>
32#include <linux/of_address.h>
33#include <linux/of_irq.h>
34#include <linux/of_pci.h>
35#include <linux/of_platform.h>
36#include <linux/percpu.h>
37#include <linux/slab.h>
Derek Basehoredba0bc72018-02-28 21:48:18 -080038#include <linux/syscore_ops.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000039
Joel Porquet41a83e062015-07-07 17:11:46 -040040#include <linux/irqchip.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000041#include <linux/irqchip/arm-gic-v3.h>
Marc Zyngierc808eea2016-12-20 09:31:20 +000042#include <linux/irqchip/arm-gic-v4.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000043
Marc Zyngiercc2d3212014-11-24 14:35:11 +000044#include <asm/cputype.h>
45#include <asm/exception.h>
46
Robert Richter67510cc2015-09-21 22:58:37 +020047#include "irq-gic-common.h"
48
Robert Richter94100972015-09-21 22:58:38 +020049#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
50#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +020051#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
Derek Basehoredba0bc72018-02-28 21:48:18 -080052#define ITS_FLAGS_SAVE_SUSPEND_STATE (1ULL << 3)
Marc Zyngiercc2d3212014-11-24 14:35:11 +000053
Marc Zyngierc48ed512014-11-24 14:35:12 +000054#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
55
Marc Zyngiera13b0402016-12-19 17:15:24 +000056static u32 lpi_id_bits;
57
58/*
59 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
60 * deal with (one configuration byte per interrupt). PENDBASE has to
61 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
62 */
63#define LPI_NRBITS lpi_id_bits
64#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
65#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
66
67#define LPI_PROP_DEFAULT_PRIO 0xa0
68
Marc Zyngiercc2d3212014-11-24 14:35:11 +000069/*
70 * Collection structure - just an ID, and a redistributor address to
71 * ping. We use one per CPU as a bag of interrupts assigned to this
72 * CPU.
73 */
74struct its_collection {
75 u64 target_address;
76 u16 col_id;
77};
78
79/*
Shanker Donthineni93473592016-06-06 18:17:30 -050080 * The ITS_BASER structure - contains memory information, cached
81 * value of BASER register configuration and ITS page size.
Shanker Donthineni466b7d12016-03-09 22:10:49 -060082 */
83struct its_baser {
84 void *base;
85 u64 val;
86 u32 order;
Shanker Donthineni93473592016-06-06 18:17:30 -050087 u32 psz;
Shanker Donthineni466b7d12016-03-09 22:10:49 -060088};
89
Ard Biesheuvel558b0162017-10-17 17:55:56 +010090struct its_device;
91
Shanker Donthineni466b7d12016-03-09 22:10:49 -060092/*
Marc Zyngiercc2d3212014-11-24 14:35:11 +000093 * The ITS structure - contains most of the infrastructure, with the
Marc Zyngier841514a2015-07-28 14:46:20 +010094 * top-level MSI domain, the command queue, the collections, and the
95 * list of devices writing to it.
Marc Zyngiercc2d3212014-11-24 14:35:11 +000096 */
97struct its_node {
98 raw_spinlock_t lock;
99 struct list_head entry;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000100 void __iomem *base;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +0200101 phys_addr_t phys_base;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000102 struct its_cmd_block *cmd_base;
103 struct its_cmd_block *cmd_write;
Shanker Donthineni466b7d12016-03-09 22:10:49 -0600104 struct its_baser tables[GITS_BASER_NR_REGS];
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000105 struct its_collection *collections;
Ard Biesheuvel558b0162017-10-17 17:55:56 +0100106 struct fwnode_handle *fwnode_handle;
107 u64 (*get_msi_base)(struct its_device *its_dev);
Derek Basehoredba0bc72018-02-28 21:48:18 -0800108 u64 cbaser_save;
109 u32 ctlr_save;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000110 struct list_head its_device_list;
111 u64 flags;
Marc Zyngierdebf6d02017-10-08 18:44:42 +0100112 unsigned long list_nr;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000113 u32 ite_size;
Shanker Donthineni466b7d12016-03-09 22:10:49 -0600114 u32 device_ids;
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +0200115 int numa_node;
Ard Biesheuvel558b0162017-10-17 17:55:56 +0100116 unsigned int msi_domain_flags;
117 u32 pre_its_base; /* for Socionext Synquacer */
Marc Zyngier3dfa5762016-12-19 17:25:54 +0000118 bool is_v4;
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100119 int vlpi_redist_offset;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000120};
121
122#define ITS_ITT_ALIGN SZ_256
123
Shanker Donthineni32bd44d2017-10-07 15:43:48 -0500124/* The maximum number of VPEID bits supported by VLPI commands */
125#define ITS_MAX_VPEID_BITS (16)
126#define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
127
Shanker Donthineni2eca0d62016-02-16 18:00:36 -0600128/* Convert page order to size in bytes */
129#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
130
Marc Zyngier591e5be2015-07-17 10:46:42 +0100131struct event_lpi_map {
132 unsigned long *lpi_map;
133 u16 *col_map;
134 irq_hw_number_t lpi_base;
135 int nr_lpis;
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000136 struct mutex vlpi_lock;
137 struct its_vm *vm;
138 struct its_vlpi_map *vlpi_maps;
139 int nr_vlpis;
Marc Zyngier591e5be2015-07-17 10:46:42 +0100140};
141
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000142/*
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000143 * The ITS view of a device - belongs to an ITS, owns an interrupt
144 * translation table, and a list of interrupts. If it some of its
145 * LPIs are injected into a guest (GICv4), the event_map.vm field
146 * indicates which one.
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000147 */
148struct its_device {
149 struct list_head entry;
150 struct its_node *its;
Marc Zyngier591e5be2015-07-17 10:46:42 +0100151 struct event_lpi_map event_map;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000152 void *itt;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000153 u32 nr_ites;
154 u32 device_id;
155};
156
Marc Zyngier20b3d542016-12-20 15:23:22 +0000157static struct {
158 raw_spinlock_t lock;
159 struct its_device *dev;
160 struct its_vpe **vpes;
161 int next_victim;
162} vpe_proxy;
163
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000164static LIST_HEAD(its_nodes);
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +0200165static DEFINE_RAW_SPINLOCK(its_lock);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000166static struct rdists *gic_rdists;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +0200167static struct irq_domain *its_parent;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000168
Marc Zyngier3dfa5762016-12-19 17:25:54 +0000169static unsigned long its_list_map;
Marc Zyngier3171a472016-12-20 15:17:28 +0000170static u16 vmovp_seq_num;
171static DEFINE_RAW_SPINLOCK(vmovp_lock);
172
Marc Zyngier7d75bbb2016-12-20 13:55:54 +0000173static DEFINE_IDA(its_vpeid_ida);
Marc Zyngier3dfa5762016-12-19 17:25:54 +0000174
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000175#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
Marc Zyngier11e37d32018-07-27 13:38:54 +0100176#define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000177#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
Marc Zyngiere643d802016-12-20 15:09:31 +0000178#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000179
Marc Zyngier591e5be2015-07-17 10:46:42 +0100180static struct its_collection *dev_event_to_col(struct its_device *its_dev,
181 u32 event)
182{
183 struct its_node *its = its_dev->its;
184
185 return its->collections + its_dev->event_map.col_map[event];
186}
187
Marc Zyngier83559b42018-06-22 10:52:52 +0100188static struct its_collection *valid_col(struct its_collection *col)
189{
190 if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(0, 15)))
191 return NULL;
192
193 return col;
194}
195
Marc Zyngier205e0652018-06-22 10:52:53 +0100196static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
197{
198 if (valid_col(its->collections + vpe->col_idx))
199 return vpe;
200
201 return NULL;
202}
203
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000204/*
205 * ITS command descriptors - parameters to be encoded in a command
206 * block.
207 */
208struct its_cmd_desc {
209 union {
210 struct {
211 struct its_device *dev;
212 u32 event_id;
213 } its_inv_cmd;
214
215 struct {
216 struct its_device *dev;
217 u32 event_id;
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000218 } its_clear_cmd;
219
220 struct {
221 struct its_device *dev;
222 u32 event_id;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000223 } its_int_cmd;
224
225 struct {
226 struct its_device *dev;
227 int valid;
228 } its_mapd_cmd;
229
230 struct {
231 struct its_collection *col;
232 int valid;
233 } its_mapc_cmd;
234
235 struct {
236 struct its_device *dev;
237 u32 phys_id;
238 u32 event_id;
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000239 } its_mapti_cmd;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000240
241 struct {
242 struct its_device *dev;
243 struct its_collection *col;
Marc Zyngier591e5be2015-07-17 10:46:42 +0100244 u32 event_id;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000245 } its_movi_cmd;
246
247 struct {
248 struct its_device *dev;
249 u32 event_id;
250 } its_discard_cmd;
251
252 struct {
253 struct its_collection *col;
254 } its_invall_cmd;
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000255
256 struct {
257 struct its_vpe *vpe;
Marc Zyngiereb781922016-12-20 14:47:05 +0000258 } its_vinvall_cmd;
259
260 struct {
261 struct its_vpe *vpe;
262 struct its_collection *col;
263 bool valid;
264 } its_vmapp_cmd;
265
266 struct {
267 struct its_vpe *vpe;
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000268 struct its_device *dev;
269 u32 virt_id;
270 u32 event_id;
271 bool db_enabled;
272 } its_vmapti_cmd;
273
274 struct {
275 struct its_vpe *vpe;
276 struct its_device *dev;
277 u32 event_id;
278 bool db_enabled;
279 } its_vmovi_cmd;
Marc Zyngier3171a472016-12-20 15:17:28 +0000280
281 struct {
282 struct its_vpe *vpe;
283 struct its_collection *col;
284 u16 seq_num;
285 u16 its_list;
286 } its_vmovp_cmd;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000287 };
288};
289
290/*
291 * The ITS command block, which is what the ITS actually parses.
292 */
293struct its_cmd_block {
294 u64 raw_cmd[4];
295};
296
297#define ITS_CMD_QUEUE_SZ SZ_64K
298#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
299
Marc Zyngier67047f902017-07-28 21:16:58 +0100300typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
301 struct its_cmd_block *,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000302 struct its_cmd_desc *);
303
Marc Zyngier67047f902017-07-28 21:16:58 +0100304typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
305 struct its_cmd_block *,
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000306 struct its_cmd_desc *);
307
Marc Zyngier4d36f132016-12-19 17:11:52 +0000308static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
309{
310 u64 mask = GENMASK_ULL(h, l);
311 *raw_cmd &= ~mask;
312 *raw_cmd |= (val << l) & mask;
313}
314
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000315static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
316{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000317 its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000318}
319
320static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
321{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000322 its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000323}
324
325static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
326{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000327 its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000328}
329
330static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
331{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000332 its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000333}
334
335static void its_encode_size(struct its_cmd_block *cmd, u8 size)
336{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000337 its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000338}
339
340static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
341{
Shanker Donthineni30ae9612017-10-09 11:46:55 -0500342 its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000343}
344
345static void its_encode_valid(struct its_cmd_block *cmd, int valid)
346{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000347 its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000348}
349
350static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
351{
Shanker Donthineni30ae9612017-10-09 11:46:55 -0500352 its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000353}
354
355static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
356{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000357 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000358}
359
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000360static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
361{
362 its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
363}
364
365static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
366{
367 its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
368}
369
370static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
371{
372 its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
373}
374
375static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
376{
377 its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
378}
379
Marc Zyngier3171a472016-12-20 15:17:28 +0000380static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
381{
382 its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
383}
384
385static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
386{
387 its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
388}
389
Marc Zyngiereb781922016-12-20 14:47:05 +0000390static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
391{
Shanker Donthineni30ae9612017-10-09 11:46:55 -0500392 its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
Marc Zyngiereb781922016-12-20 14:47:05 +0000393}
394
395static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
396{
397 its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
398}
399
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000400static inline void its_fixup_cmd(struct its_cmd_block *cmd)
401{
402 /* Let's fixup BE commands */
403 cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]);
404 cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]);
405 cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]);
406 cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]);
407}
408
Marc Zyngier67047f902017-07-28 21:16:58 +0100409static struct its_collection *its_build_mapd_cmd(struct its_node *its,
410 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000411 struct its_cmd_desc *desc)
412{
413 unsigned long itt_addr;
Marc Zyngierc8481262014-12-12 10:51:24 +0000414 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000415
416 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
417 itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
418
419 its_encode_cmd(cmd, GITS_CMD_MAPD);
420 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
421 its_encode_size(cmd, size - 1);
422 its_encode_itt(cmd, itt_addr);
423 its_encode_valid(cmd, desc->its_mapd_cmd.valid);
424
425 its_fixup_cmd(cmd);
426
Marc Zyngier591e5be2015-07-17 10:46:42 +0100427 return NULL;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000428}
429
Marc Zyngier67047f902017-07-28 21:16:58 +0100430static struct its_collection *its_build_mapc_cmd(struct its_node *its,
431 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000432 struct its_cmd_desc *desc)
433{
434 its_encode_cmd(cmd, GITS_CMD_MAPC);
435 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
436 its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
437 its_encode_valid(cmd, desc->its_mapc_cmd.valid);
438
439 its_fixup_cmd(cmd);
440
441 return desc->its_mapc_cmd.col;
442}
443
Marc Zyngier67047f902017-07-28 21:16:58 +0100444static struct its_collection *its_build_mapti_cmd(struct its_node *its,
445 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000446 struct its_cmd_desc *desc)
447{
Marc Zyngier591e5be2015-07-17 10:46:42 +0100448 struct its_collection *col;
449
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000450 col = dev_event_to_col(desc->its_mapti_cmd.dev,
451 desc->its_mapti_cmd.event_id);
Marc Zyngier591e5be2015-07-17 10:46:42 +0100452
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000453 its_encode_cmd(cmd, GITS_CMD_MAPTI);
454 its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
455 its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
456 its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
Marc Zyngier591e5be2015-07-17 10:46:42 +0100457 its_encode_collection(cmd, col->col_id);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000458
459 its_fixup_cmd(cmd);
460
Marc Zyngier83559b42018-06-22 10:52:52 +0100461 return valid_col(col);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000462}
463
Marc Zyngier67047f902017-07-28 21:16:58 +0100464static struct its_collection *its_build_movi_cmd(struct its_node *its,
465 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000466 struct its_cmd_desc *desc)
467{
Marc Zyngier591e5be2015-07-17 10:46:42 +0100468 struct its_collection *col;
469
470 col = dev_event_to_col(desc->its_movi_cmd.dev,
471 desc->its_movi_cmd.event_id);
472
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000473 its_encode_cmd(cmd, GITS_CMD_MOVI);
474 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
Marc Zyngier591e5be2015-07-17 10:46:42 +0100475 its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000476 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
477
478 its_fixup_cmd(cmd);
479
Marc Zyngier83559b42018-06-22 10:52:52 +0100480 return valid_col(col);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000481}
482
Marc Zyngier67047f902017-07-28 21:16:58 +0100483static struct its_collection *its_build_discard_cmd(struct its_node *its,
484 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000485 struct its_cmd_desc *desc)
486{
Marc Zyngier591e5be2015-07-17 10:46:42 +0100487 struct its_collection *col;
488
489 col = dev_event_to_col(desc->its_discard_cmd.dev,
490 desc->its_discard_cmd.event_id);
491
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000492 its_encode_cmd(cmd, GITS_CMD_DISCARD);
493 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
494 its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
495
496 its_fixup_cmd(cmd);
497
Marc Zyngier83559b42018-06-22 10:52:52 +0100498 return valid_col(col);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000499}
500
Marc Zyngier67047f902017-07-28 21:16:58 +0100501static struct its_collection *its_build_inv_cmd(struct its_node *its,
502 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000503 struct its_cmd_desc *desc)
504{
Marc Zyngier591e5be2015-07-17 10:46:42 +0100505 struct its_collection *col;
506
507 col = dev_event_to_col(desc->its_inv_cmd.dev,
508 desc->its_inv_cmd.event_id);
509
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000510 its_encode_cmd(cmd, GITS_CMD_INV);
511 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
512 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
513
514 its_fixup_cmd(cmd);
515
Marc Zyngier83559b42018-06-22 10:52:52 +0100516 return valid_col(col);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000517}
518
Marc Zyngier67047f902017-07-28 21:16:58 +0100519static struct its_collection *its_build_int_cmd(struct its_node *its,
520 struct its_cmd_block *cmd,
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000521 struct its_cmd_desc *desc)
522{
523 struct its_collection *col;
524
525 col = dev_event_to_col(desc->its_int_cmd.dev,
526 desc->its_int_cmd.event_id);
527
528 its_encode_cmd(cmd, GITS_CMD_INT);
529 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
530 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
531
532 its_fixup_cmd(cmd);
533
Marc Zyngier83559b42018-06-22 10:52:52 +0100534 return valid_col(col);
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000535}
536
Marc Zyngier67047f902017-07-28 21:16:58 +0100537static struct its_collection *its_build_clear_cmd(struct its_node *its,
538 struct its_cmd_block *cmd,
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000539 struct its_cmd_desc *desc)
540{
541 struct its_collection *col;
542
543 col = dev_event_to_col(desc->its_clear_cmd.dev,
544 desc->its_clear_cmd.event_id);
545
546 its_encode_cmd(cmd, GITS_CMD_CLEAR);
547 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
548 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
549
550 its_fixup_cmd(cmd);
551
Marc Zyngier83559b42018-06-22 10:52:52 +0100552 return valid_col(col);
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000553}
554
Marc Zyngier67047f902017-07-28 21:16:58 +0100555static struct its_collection *its_build_invall_cmd(struct its_node *its,
556 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000557 struct its_cmd_desc *desc)
558{
559 its_encode_cmd(cmd, GITS_CMD_INVALL);
560 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
561
562 its_fixup_cmd(cmd);
563
564 return NULL;
565}
566
Marc Zyngier67047f902017-07-28 21:16:58 +0100567static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
568 struct its_cmd_block *cmd,
Marc Zyngiereb781922016-12-20 14:47:05 +0000569 struct its_cmd_desc *desc)
570{
571 its_encode_cmd(cmd, GITS_CMD_VINVALL);
572 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
573
574 its_fixup_cmd(cmd);
575
Marc Zyngier205e0652018-06-22 10:52:53 +0100576 return valid_vpe(its, desc->its_vinvall_cmd.vpe);
Marc Zyngiereb781922016-12-20 14:47:05 +0000577}
578
Marc Zyngier67047f902017-07-28 21:16:58 +0100579static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
580 struct its_cmd_block *cmd,
Marc Zyngiereb781922016-12-20 14:47:05 +0000581 struct its_cmd_desc *desc)
582{
583 unsigned long vpt_addr;
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100584 u64 target;
Marc Zyngiereb781922016-12-20 14:47:05 +0000585
586 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100587 target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
Marc Zyngiereb781922016-12-20 14:47:05 +0000588
589 its_encode_cmd(cmd, GITS_CMD_VMAPP);
590 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
591 its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100592 its_encode_target(cmd, target);
Marc Zyngiereb781922016-12-20 14:47:05 +0000593 its_encode_vpt_addr(cmd, vpt_addr);
594 its_encode_vpt_size(cmd, LPI_NRBITS - 1);
595
596 its_fixup_cmd(cmd);
597
Marc Zyngier205e0652018-06-22 10:52:53 +0100598 return valid_vpe(its, desc->its_vmapp_cmd.vpe);
Marc Zyngiereb781922016-12-20 14:47:05 +0000599}
600
Marc Zyngier67047f902017-07-28 21:16:58 +0100601static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
602 struct its_cmd_block *cmd,
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000603 struct its_cmd_desc *desc)
604{
605 u32 db;
606
607 if (desc->its_vmapti_cmd.db_enabled)
608 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
609 else
610 db = 1023;
611
612 its_encode_cmd(cmd, GITS_CMD_VMAPTI);
613 its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
614 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
615 its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
616 its_encode_db_phys_id(cmd, db);
617 its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
618
619 its_fixup_cmd(cmd);
620
Marc Zyngier205e0652018-06-22 10:52:53 +0100621 return valid_vpe(its, desc->its_vmapti_cmd.vpe);
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000622}
623
Marc Zyngier67047f902017-07-28 21:16:58 +0100624static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
625 struct its_cmd_block *cmd,
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000626 struct its_cmd_desc *desc)
627{
628 u32 db;
629
630 if (desc->its_vmovi_cmd.db_enabled)
631 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
632 else
633 db = 1023;
634
635 its_encode_cmd(cmd, GITS_CMD_VMOVI);
636 its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
637 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
638 its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
639 its_encode_db_phys_id(cmd, db);
640 its_encode_db_valid(cmd, true);
641
642 its_fixup_cmd(cmd);
643
Marc Zyngier205e0652018-06-22 10:52:53 +0100644 return valid_vpe(its, desc->its_vmovi_cmd.vpe);
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000645}
646
Marc Zyngier67047f902017-07-28 21:16:58 +0100647static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
648 struct its_cmd_block *cmd,
Marc Zyngier3171a472016-12-20 15:17:28 +0000649 struct its_cmd_desc *desc)
650{
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100651 u64 target;
652
653 target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
Marc Zyngier3171a472016-12-20 15:17:28 +0000654 its_encode_cmd(cmd, GITS_CMD_VMOVP);
655 its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
656 its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
657 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100658 its_encode_target(cmd, target);
Marc Zyngier3171a472016-12-20 15:17:28 +0000659
660 its_fixup_cmd(cmd);
661
Marc Zyngier205e0652018-06-22 10:52:53 +0100662 return valid_vpe(its, desc->its_vmovp_cmd.vpe);
Marc Zyngier3171a472016-12-20 15:17:28 +0000663}
664
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000665static u64 its_cmd_ptr_to_offset(struct its_node *its,
666 struct its_cmd_block *ptr)
667{
668 return (ptr - its->cmd_base) * sizeof(*ptr);
669}
670
671static int its_queue_full(struct its_node *its)
672{
673 int widx;
674 int ridx;
675
676 widx = its->cmd_write - its->cmd_base;
677 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
678
679 /* This is incredibly unlikely to happen, unless the ITS locks up. */
680 if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
681 return 1;
682
683 return 0;
684}
685
686static struct its_cmd_block *its_allocate_entry(struct its_node *its)
687{
688 struct its_cmd_block *cmd;
689 u32 count = 1000000; /* 1s! */
690
691 while (its_queue_full(its)) {
692 count--;
693 if (!count) {
694 pr_err_ratelimited("ITS queue not draining\n");
695 return NULL;
696 }
697 cpu_relax();
698 udelay(1);
699 }
700
701 cmd = its->cmd_write++;
702
703 /* Handle queue wrapping */
704 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
705 its->cmd_write = its->cmd_base;
706
Marc Zyngier34d677a2016-12-19 17:16:45 +0000707 /* Clear command */
708 cmd->raw_cmd[0] = 0;
709 cmd->raw_cmd[1] = 0;
710 cmd->raw_cmd[2] = 0;
711 cmd->raw_cmd[3] = 0;
712
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000713 return cmd;
714}
715
716static struct its_cmd_block *its_post_commands(struct its_node *its)
717{
718 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
719
720 writel_relaxed(wr, its->base + GITS_CWRITER);
721
722 return its->cmd_write;
723}
724
725static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
726{
727 /*
728 * Make sure the commands written to memory are observable by
729 * the ITS.
730 */
731 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
Vladimir Murzin328191c2016-11-02 11:54:05 +0000732 gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000733 else
734 dsb(ishst);
735}
736
Marc Zyngiera19b4622017-08-04 17:45:50 +0100737static int its_wait_for_range_completion(struct its_node *its,
738 struct its_cmd_block *from,
739 struct its_cmd_block *to)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000740{
741 u64 rd_idx, from_idx, to_idx;
742 u32 count = 1000000; /* 1s! */
743
744 from_idx = its_cmd_ptr_to_offset(its, from);
745 to_idx = its_cmd_ptr_to_offset(its, to);
746
747 while (1) {
748 rd_idx = readl_relaxed(its->base + GITS_CREADR);
Marc Zyngier9bdd8b12017-08-19 10:16:02 +0100749
750 /* Direct case */
751 if (from_idx < to_idx && rd_idx >= to_idx)
752 break;
753
754 /* Wrapped case */
755 if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000756 break;
757
758 count--;
759 if (!count) {
Marc Zyngiera19b4622017-08-04 17:45:50 +0100760 pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n",
761 from_idx, to_idx, rd_idx);
762 return -1;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000763 }
764 cpu_relax();
765 udelay(1);
766 }
Marc Zyngiera19b4622017-08-04 17:45:50 +0100767
768 return 0;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000769}
770
Marc Zyngiere4f90942016-12-19 17:56:32 +0000771/* Warning, macro hell follows */
772#define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
773void name(struct its_node *its, \
774 buildtype builder, \
775 struct its_cmd_desc *desc) \
776{ \
777 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
778 synctype *sync_obj; \
779 unsigned long flags; \
780 \
781 raw_spin_lock_irqsave(&its->lock, flags); \
782 \
783 cmd = its_allocate_entry(its); \
784 if (!cmd) { /* We're soooooo screewed... */ \
785 raw_spin_unlock_irqrestore(&its->lock, flags); \
786 return; \
787 } \
Marc Zyngier67047f902017-07-28 21:16:58 +0100788 sync_obj = builder(its, cmd, desc); \
Marc Zyngiere4f90942016-12-19 17:56:32 +0000789 its_flush_cmd(its, cmd); \
790 \
791 if (sync_obj) { \
792 sync_cmd = its_allocate_entry(its); \
793 if (!sync_cmd) \
794 goto post; \
795 \
Marc Zyngier67047f902017-07-28 21:16:58 +0100796 buildfn(its, sync_cmd, sync_obj); \
Marc Zyngiere4f90942016-12-19 17:56:32 +0000797 its_flush_cmd(its, sync_cmd); \
798 } \
799 \
800post: \
801 next_cmd = its_post_commands(its); \
802 raw_spin_unlock_irqrestore(&its->lock, flags); \
803 \
Marc Zyngiera19b4622017-08-04 17:45:50 +0100804 if (its_wait_for_range_completion(its, cmd, next_cmd)) \
805 pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000806}
807
Marc Zyngier67047f902017-07-28 21:16:58 +0100808static void its_build_sync_cmd(struct its_node *its,
809 struct its_cmd_block *sync_cmd,
Marc Zyngiere4f90942016-12-19 17:56:32 +0000810 struct its_collection *sync_col)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000811{
Marc Zyngiere4f90942016-12-19 17:56:32 +0000812 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
813 its_encode_target(sync_cmd, sync_col->target_address);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000814
Marc Zyngiere4f90942016-12-19 17:56:32 +0000815 its_fixup_cmd(sync_cmd);
816}
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000817
Marc Zyngiere4f90942016-12-19 17:56:32 +0000818static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
819 struct its_collection, its_build_sync_cmd)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000820
Marc Zyngier67047f902017-07-28 21:16:58 +0100821static void its_build_vsync_cmd(struct its_node *its,
822 struct its_cmd_block *sync_cmd,
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000823 struct its_vpe *sync_vpe)
824{
825 its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
826 its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000827
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000828 its_fixup_cmd(sync_cmd);
829}
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000830
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000831static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
832 struct its_vpe, its_build_vsync_cmd)
833
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000834static void its_send_int(struct its_device *dev, u32 event_id)
835{
836 struct its_cmd_desc desc;
837
838 desc.its_int_cmd.dev = dev;
839 desc.its_int_cmd.event_id = event_id;
840
841 its_send_single_command(dev->its, its_build_int_cmd, &desc);
842}
843
844static void its_send_clear(struct its_device *dev, u32 event_id)
845{
846 struct its_cmd_desc desc;
847
848 desc.its_clear_cmd.dev = dev;
849 desc.its_clear_cmd.event_id = event_id;
850
851 its_send_single_command(dev->its, its_build_clear_cmd, &desc);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000852}
853
854static void its_send_inv(struct its_device *dev, u32 event_id)
855{
856 struct its_cmd_desc desc;
857
858 desc.its_inv_cmd.dev = dev;
859 desc.its_inv_cmd.event_id = event_id;
860
861 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
862}
863
864static void its_send_mapd(struct its_device *dev, int valid)
865{
866 struct its_cmd_desc desc;
867
868 desc.its_mapd_cmd.dev = dev;
869 desc.its_mapd_cmd.valid = !!valid;
870
871 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
872}
873
874static void its_send_mapc(struct its_node *its, struct its_collection *col,
875 int valid)
876{
877 struct its_cmd_desc desc;
878
879 desc.its_mapc_cmd.col = col;
880 desc.its_mapc_cmd.valid = !!valid;
881
882 its_send_single_command(its, its_build_mapc_cmd, &desc);
883}
884
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000885static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000886{
887 struct its_cmd_desc desc;
888
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000889 desc.its_mapti_cmd.dev = dev;
890 desc.its_mapti_cmd.phys_id = irq_id;
891 desc.its_mapti_cmd.event_id = id;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000892
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000893 its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000894}
895
896static void its_send_movi(struct its_device *dev,
897 struct its_collection *col, u32 id)
898{
899 struct its_cmd_desc desc;
900
901 desc.its_movi_cmd.dev = dev;
902 desc.its_movi_cmd.col = col;
Marc Zyngier591e5be2015-07-17 10:46:42 +0100903 desc.its_movi_cmd.event_id = id;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000904
905 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
906}
907
908static void its_send_discard(struct its_device *dev, u32 id)
909{
910 struct its_cmd_desc desc;
911
912 desc.its_discard_cmd.dev = dev;
913 desc.its_discard_cmd.event_id = id;
914
915 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
916}
917
918static void its_send_invall(struct its_node *its, struct its_collection *col)
919{
920 struct its_cmd_desc desc;
921
922 desc.its_invall_cmd.col = col;
923
924 its_send_single_command(its, its_build_invall_cmd, &desc);
925}
Marc Zyngierc48ed512014-11-24 14:35:12 +0000926
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000927static void its_send_vmapti(struct its_device *dev, u32 id)
928{
929 struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
930 struct its_cmd_desc desc;
931
932 desc.its_vmapti_cmd.vpe = map->vpe;
933 desc.its_vmapti_cmd.dev = dev;
934 desc.its_vmapti_cmd.virt_id = map->vintid;
935 desc.its_vmapti_cmd.event_id = id;
936 desc.its_vmapti_cmd.db_enabled = map->db_enabled;
937
938 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
939}
940
941static void its_send_vmovi(struct its_device *dev, u32 id)
942{
943 struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
944 struct its_cmd_desc desc;
945
946 desc.its_vmovi_cmd.vpe = map->vpe;
947 desc.its_vmovi_cmd.dev = dev;
948 desc.its_vmovi_cmd.event_id = id;
949 desc.its_vmovi_cmd.db_enabled = map->db_enabled;
950
951 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
952}
953
Marc Zyngier75fd9512017-10-08 18:46:39 +0100954static void its_send_vmapp(struct its_node *its,
955 struct its_vpe *vpe, bool valid)
Marc Zyngiereb781922016-12-20 14:47:05 +0000956{
957 struct its_cmd_desc desc;
Marc Zyngiereb781922016-12-20 14:47:05 +0000958
959 desc.its_vmapp_cmd.vpe = vpe;
960 desc.its_vmapp_cmd.valid = valid;
Marc Zyngier75fd9512017-10-08 18:46:39 +0100961 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
Marc Zyngiereb781922016-12-20 14:47:05 +0000962
Marc Zyngier75fd9512017-10-08 18:46:39 +0100963 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
Marc Zyngiereb781922016-12-20 14:47:05 +0000964}
965
Marc Zyngier3171a472016-12-20 15:17:28 +0000966static void its_send_vmovp(struct its_vpe *vpe)
967{
968 struct its_cmd_desc desc;
969 struct its_node *its;
970 unsigned long flags;
971 int col_id = vpe->col_idx;
972
973 desc.its_vmovp_cmd.vpe = vpe;
974 desc.its_vmovp_cmd.its_list = (u16)its_list_map;
975
976 if (!its_list_map) {
977 its = list_first_entry(&its_nodes, struct its_node, entry);
978 desc.its_vmovp_cmd.seq_num = 0;
979 desc.its_vmovp_cmd.col = &its->collections[col_id];
980 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
981 return;
982 }
983
984 /*
985 * Yet another marvel of the architecture. If using the
986 * its_list "feature", we need to make sure that all ITSs
987 * receive all VMOVP commands in the same order. The only way
988 * to guarantee this is to make vmovp a serialization point.
989 *
990 * Wall <-- Head.
991 */
992 raw_spin_lock_irqsave(&vmovp_lock, flags);
993
994 desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
995
996 /* Emit VMOVPs */
997 list_for_each_entry(its, &its_nodes, entry) {
998 if (!its->is_v4)
999 continue;
1000
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001001 if (!vpe->its_vm->vlpi_count[its->list_nr])
1002 continue;
1003
Marc Zyngier3171a472016-12-20 15:17:28 +00001004 desc.its_vmovp_cmd.col = &its->collections[col_id];
1005 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1006 }
1007
1008 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1009}
1010
Marc Zyngier40619a22017-10-08 15:16:09 +01001011static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
Marc Zyngiereb781922016-12-20 14:47:05 +00001012{
1013 struct its_cmd_desc desc;
Marc Zyngiereb781922016-12-20 14:47:05 +00001014
1015 desc.its_vinvall_cmd.vpe = vpe;
Marc Zyngier40619a22017-10-08 15:16:09 +01001016 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
Marc Zyngiereb781922016-12-20 14:47:05 +00001017}
1018
Marc Zyngierc48ed512014-11-24 14:35:12 +00001019/*
1020 * irqchip functions - assumes MSI, mostly.
1021 */
1022
1023static inline u32 its_get_event_id(struct irq_data *d)
1024{
1025 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
Marc Zyngier591e5be2015-07-17 10:46:42 +01001026 return d->hwirq - its_dev->event_map.lpi_base;
Marc Zyngierc48ed512014-11-24 14:35:12 +00001027}
1028
Marc Zyngier015ec032016-12-20 09:54:57 +00001029static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
Marc Zyngierc48ed512014-11-24 14:35:12 +00001030{
Marc Zyngier015ec032016-12-20 09:54:57 +00001031 irq_hw_number_t hwirq;
Marc Zyngiere1a2e202018-07-27 14:36:00 +01001032 void *va;
Marc Zyngieradcdb942016-12-19 19:18:13 +00001033 u8 *cfg;
Marc Zyngierc48ed512014-11-24 14:35:12 +00001034
Marc Zyngier015ec032016-12-20 09:54:57 +00001035 if (irqd_is_forwarded_to_vcpu(d)) {
1036 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1037 u32 event = its_get_event_id(d);
Marc Zyngierd4d7b4a2017-10-26 10:44:07 +01001038 struct its_vlpi_map *map;
Marc Zyngier015ec032016-12-20 09:54:57 +00001039
Marc Zyngiere1a2e202018-07-27 14:36:00 +01001040 va = page_address(its_dev->event_map.vm->vprop_page);
Marc Zyngierd4d7b4a2017-10-26 10:44:07 +01001041 map = &its_dev->event_map.vlpi_maps[event];
1042 hwirq = map->vintid;
1043
1044 /* Remember the updated property */
1045 map->properties &= ~clr;
1046 map->properties |= set | LPI_PROP_GROUP1;
Marc Zyngier015ec032016-12-20 09:54:57 +00001047 } else {
Marc Zyngiere1a2e202018-07-27 14:36:00 +01001048 va = gic_rdists->prop_table_va;
Marc Zyngier015ec032016-12-20 09:54:57 +00001049 hwirq = d->hwirq;
1050 }
Marc Zyngieradcdb942016-12-19 19:18:13 +00001051
Marc Zyngiere1a2e202018-07-27 14:36:00 +01001052 cfg = va + hwirq - 8192;
Marc Zyngieradcdb942016-12-19 19:18:13 +00001053 *cfg &= ~clr;
Marc Zyngier015ec032016-12-20 09:54:57 +00001054 *cfg |= set | LPI_PROP_GROUP1;
Marc Zyngierc48ed512014-11-24 14:35:12 +00001055
1056 /*
1057 * Make the above write visible to the redistributors.
1058 * And yes, we're flushing exactly: One. Single. Byte.
1059 * Humpf...
1060 */
1061 if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
Vladimir Murzin328191c2016-11-02 11:54:05 +00001062 gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
Marc Zyngierc48ed512014-11-24 14:35:12 +00001063 else
1064 dsb(ishst);
Marc Zyngier015ec032016-12-20 09:54:57 +00001065}
1066
1067static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1068{
1069 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1070
1071 lpi_write_config(d, clr, set);
Marc Zyngieradcdb942016-12-19 19:18:13 +00001072 its_send_inv(its_dev, its_get_event_id(d));
Marc Zyngierc48ed512014-11-24 14:35:12 +00001073}
1074
Marc Zyngier015ec032016-12-20 09:54:57 +00001075static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1076{
1077 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1078 u32 event = its_get_event_id(d);
1079
1080 if (its_dev->event_map.vlpi_maps[event].db_enabled == enable)
1081 return;
1082
1083 its_dev->event_map.vlpi_maps[event].db_enabled = enable;
1084
1085 /*
1086 * More fun with the architecture:
1087 *
1088 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1089 * value or to 1023, depending on the enable bit. But that
1090 * would be issueing a mapping for an /existing/ DevID+EventID
1091 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1092 * to the /same/ vPE, using this opportunity to adjust the
1093 * doorbell. Mouahahahaha. We loves it, Precious.
1094 */
1095 its_send_vmovi(its_dev, event);
Marc Zyngierc48ed512014-11-24 14:35:12 +00001096}
1097
1098static void its_mask_irq(struct irq_data *d)
1099{
Marc Zyngier015ec032016-12-20 09:54:57 +00001100 if (irqd_is_forwarded_to_vcpu(d))
1101 its_vlpi_set_doorbell(d, false);
1102
Marc Zyngieradcdb942016-12-19 19:18:13 +00001103 lpi_update_config(d, LPI_PROP_ENABLED, 0);
Marc Zyngierc48ed512014-11-24 14:35:12 +00001104}
1105
1106static void its_unmask_irq(struct irq_data *d)
1107{
Marc Zyngier015ec032016-12-20 09:54:57 +00001108 if (irqd_is_forwarded_to_vcpu(d))
1109 its_vlpi_set_doorbell(d, true);
1110
Marc Zyngieradcdb942016-12-19 19:18:13 +00001111 lpi_update_config(d, 0, LPI_PROP_ENABLED);
Marc Zyngierc48ed512014-11-24 14:35:12 +00001112}
1113
Marc Zyngierc48ed512014-11-24 14:35:12 +00001114static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1115 bool force)
1116{
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02001117 unsigned int cpu;
1118 const struct cpumask *cpu_mask = cpu_online_mask;
Marc Zyngierc48ed512014-11-24 14:35:12 +00001119 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1120 struct its_collection *target_col;
1121 u32 id = its_get_event_id(d);
1122
Marc Zyngier015ec032016-12-20 09:54:57 +00001123 /* A forwarded interrupt should use irq_set_vcpu_affinity */
1124 if (irqd_is_forwarded_to_vcpu(d))
1125 return -EINVAL;
1126
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02001127 /* lpi cannot be routed to a redistributor that is on a foreign node */
1128 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
1129 if (its_dev->its->numa_node >= 0) {
1130 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
1131 if (!cpumask_intersects(mask_val, cpu_mask))
1132 return -EINVAL;
1133 }
1134 }
1135
1136 cpu = cpumask_any_and(mask_val, cpu_mask);
1137
Marc Zyngierc48ed512014-11-24 14:35:12 +00001138 if (cpu >= nr_cpu_ids)
1139 return -EINVAL;
1140
MaJun8b8d94a2017-05-18 16:19:13 +08001141 /* don't set the affinity when the target cpu is same as current one */
1142 if (cpu != its_dev->event_map.col_map[id]) {
1143 target_col = &its_dev->its->collections[cpu];
1144 its_send_movi(its_dev, target_col, id);
1145 its_dev->event_map.col_map[id] = cpu;
Marc Zyngier0d224d32017-08-18 09:39:18 +01001146 irq_data_update_effective_affinity(d, cpumask_of(cpu));
MaJun8b8d94a2017-05-18 16:19:13 +08001147 }
Marc Zyngierc48ed512014-11-24 14:35:12 +00001148
1149 return IRQ_SET_MASK_OK_DONE;
1150}
1151
Ard Biesheuvel558b0162017-10-17 17:55:56 +01001152static u64 its_irq_get_msi_base(struct its_device *its_dev)
1153{
1154 struct its_node *its = its_dev->its;
1155
1156 return its->phys_base + GITS_TRANSLATER;
1157}
1158
Marc Zyngierb48ac832014-11-24 14:35:16 +00001159static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1160{
1161 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1162 struct its_node *its;
1163 u64 addr;
1164
1165 its = its_dev->its;
Ard Biesheuvel558b0162017-10-17 17:55:56 +01001166 addr = its->get_msi_base(its_dev);
Marc Zyngierb48ac832014-11-24 14:35:16 +00001167
Vladimir Murzinb11283e2016-11-02 11:54:03 +00001168 msg->address_lo = lower_32_bits(addr);
1169 msg->address_hi = upper_32_bits(addr);
Marc Zyngierb48ac832014-11-24 14:35:16 +00001170 msg->data = its_get_event_id(d);
Robin Murphy44bb7e22016-09-12 17:13:59 +01001171
1172 iommu_dma_map_msi_msg(d->irq, msg);
Marc Zyngierb48ac832014-11-24 14:35:16 +00001173}
1174
Marc Zyngier8d85dce2016-12-19 18:02:13 +00001175static int its_irq_set_irqchip_state(struct irq_data *d,
1176 enum irqchip_irq_state which,
1177 bool state)
1178{
1179 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1180 u32 event = its_get_event_id(d);
1181
1182 if (which != IRQCHIP_STATE_PENDING)
1183 return -EINVAL;
1184
1185 if (state)
1186 its_send_int(its_dev, event);
1187 else
1188 its_send_clear(its_dev, event);
1189
1190 return 0;
1191}
1192
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001193static void its_map_vm(struct its_node *its, struct its_vm *vm)
1194{
1195 unsigned long flags;
1196
1197 /* Not using the ITS list? Everything is always mapped. */
1198 if (!its_list_map)
1199 return;
1200
1201 raw_spin_lock_irqsave(&vmovp_lock, flags);
1202
1203 /*
1204 * If the VM wasn't mapped yet, iterate over the vpes and get
1205 * them mapped now.
1206 */
1207 vm->vlpi_count[its->list_nr]++;
1208
1209 if (vm->vlpi_count[its->list_nr] == 1) {
1210 int i;
1211
1212 for (i = 0; i < vm->nr_vpes; i++) {
1213 struct its_vpe *vpe = vm->vpes[i];
Marc Zyngier44c4c252017-10-19 10:11:34 +01001214 struct irq_data *d = irq_get_irq_data(vpe->irq);
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001215
1216 /* Map the VPE to the first possible CPU */
1217 vpe->col_idx = cpumask_first(cpu_online_mask);
1218 its_send_vmapp(its, vpe, true);
1219 its_send_vinvall(its, vpe);
Marc Zyngier44c4c252017-10-19 10:11:34 +01001220 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001221 }
1222 }
1223
1224 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1225}
1226
1227static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
1228{
1229 unsigned long flags;
1230
1231 /* Not using the ITS list? Everything is always mapped. */
1232 if (!its_list_map)
1233 return;
1234
1235 raw_spin_lock_irqsave(&vmovp_lock, flags);
1236
1237 if (!--vm->vlpi_count[its->list_nr]) {
1238 int i;
1239
1240 for (i = 0; i < vm->nr_vpes; i++)
1241 its_send_vmapp(its, vm->vpes[i], false);
1242 }
1243
1244 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1245}
1246
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001247static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1248{
1249 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1250 u32 event = its_get_event_id(d);
1251 int ret = 0;
1252
1253 if (!info->map)
1254 return -EINVAL;
1255
1256 mutex_lock(&its_dev->event_map.vlpi_lock);
1257
1258 if (!its_dev->event_map.vm) {
1259 struct its_vlpi_map *maps;
1260
Kees Cook6396bb22018-06-12 14:03:40 -07001261 maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001262 GFP_KERNEL);
1263 if (!maps) {
1264 ret = -ENOMEM;
1265 goto out;
1266 }
1267
1268 its_dev->event_map.vm = info->map->vm;
1269 its_dev->event_map.vlpi_maps = maps;
1270 } else if (its_dev->event_map.vm != info->map->vm) {
1271 ret = -EINVAL;
1272 goto out;
1273 }
1274
1275 /* Get our private copy of the mapping information */
1276 its_dev->event_map.vlpi_maps[event] = *info->map;
1277
1278 if (irqd_is_forwarded_to_vcpu(d)) {
1279 /* Already mapped, move it around */
1280 its_send_vmovi(its_dev, event);
1281 } else {
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001282 /* Ensure all the VPEs are mapped on this ITS */
1283 its_map_vm(its_dev->its, info->map->vm);
1284
Marc Zyngierd4d7b4a2017-10-26 10:44:07 +01001285 /*
1286 * Flag the interrupt as forwarded so that we can
1287 * start poking the virtual property table.
1288 */
1289 irqd_set_forwarded_to_vcpu(d);
1290
1291 /* Write out the property to the prop table */
1292 lpi_write_config(d, 0xff, info->map->properties);
1293
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001294 /* Drop the physical mapping */
1295 its_send_discard(its_dev, event);
1296
1297 /* and install the virtual one */
1298 its_send_vmapti(its_dev, event);
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001299
1300 /* Increment the number of VLPIs */
1301 its_dev->event_map.nr_vlpis++;
1302 }
1303
1304out:
1305 mutex_unlock(&its_dev->event_map.vlpi_lock);
1306 return ret;
1307}
1308
1309static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1310{
1311 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1312 u32 event = its_get_event_id(d);
1313 int ret = 0;
1314
1315 mutex_lock(&its_dev->event_map.vlpi_lock);
1316
1317 if (!its_dev->event_map.vm ||
1318 !its_dev->event_map.vlpi_maps[event].vm) {
1319 ret = -EINVAL;
1320 goto out;
1321 }
1322
1323 /* Copy our mapping information to the incoming request */
1324 *info->map = its_dev->event_map.vlpi_maps[event];
1325
1326out:
1327 mutex_unlock(&its_dev->event_map.vlpi_lock);
1328 return ret;
1329}
1330
1331static int its_vlpi_unmap(struct irq_data *d)
1332{
1333 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1334 u32 event = its_get_event_id(d);
1335 int ret = 0;
1336
1337 mutex_lock(&its_dev->event_map.vlpi_lock);
1338
1339 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
1340 ret = -EINVAL;
1341 goto out;
1342 }
1343
1344 /* Drop the virtual mapping */
1345 its_send_discard(its_dev, event);
1346
1347 /* and restore the physical one */
1348 irqd_clr_forwarded_to_vcpu(d);
1349 its_send_mapti(its_dev, d->hwirq, event);
1350 lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
1351 LPI_PROP_ENABLED |
1352 LPI_PROP_GROUP1));
1353
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001354 /* Potentially unmap the VM from this ITS */
1355 its_unmap_vm(its_dev->its, its_dev->event_map.vm);
1356
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001357 /*
1358 * Drop the refcount and make the device available again if
1359 * this was the last VLPI.
1360 */
1361 if (!--its_dev->event_map.nr_vlpis) {
1362 its_dev->event_map.vm = NULL;
1363 kfree(its_dev->event_map.vlpi_maps);
1364 }
1365
1366out:
1367 mutex_unlock(&its_dev->event_map.vlpi_lock);
1368 return ret;
1369}
1370
Marc Zyngier015ec032016-12-20 09:54:57 +00001371static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
1372{
1373 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1374
1375 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1376 return -EINVAL;
1377
1378 if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
1379 lpi_update_config(d, 0xff, info->config);
1380 else
1381 lpi_write_config(d, 0xff, info->config);
1382 its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
1383
1384 return 0;
1385}
1386
Marc Zyngierc808eea2016-12-20 09:31:20 +00001387static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
1388{
1389 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1390 struct its_cmd_info *info = vcpu_info;
1391
1392 /* Need a v4 ITS */
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001393 if (!its_dev->its->is_v4)
Marc Zyngierc808eea2016-12-20 09:31:20 +00001394 return -EINVAL;
1395
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001396 /* Unmap request? */
1397 if (!info)
1398 return its_vlpi_unmap(d);
1399
Marc Zyngierc808eea2016-12-20 09:31:20 +00001400 switch (info->cmd_type) {
1401 case MAP_VLPI:
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001402 return its_vlpi_map(d, info);
Marc Zyngierc808eea2016-12-20 09:31:20 +00001403
1404 case GET_VLPI:
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001405 return its_vlpi_get(d, info);
Marc Zyngierc808eea2016-12-20 09:31:20 +00001406
1407 case PROP_UPDATE_VLPI:
1408 case PROP_UPDATE_AND_INV_VLPI:
Marc Zyngier015ec032016-12-20 09:54:57 +00001409 return its_vlpi_prop_update(d, info);
Marc Zyngierc808eea2016-12-20 09:31:20 +00001410
1411 default:
1412 return -EINVAL;
1413 }
1414}
1415
Marc Zyngierc48ed512014-11-24 14:35:12 +00001416static struct irq_chip its_irq_chip = {
1417 .name = "ITS",
1418 .irq_mask = its_mask_irq,
1419 .irq_unmask = its_unmask_irq,
Ashok Kumar004fa082016-02-11 05:38:53 -08001420 .irq_eoi = irq_chip_eoi_parent,
Marc Zyngierc48ed512014-11-24 14:35:12 +00001421 .irq_set_affinity = its_set_affinity,
Marc Zyngierb48ac832014-11-24 14:35:16 +00001422 .irq_compose_msi_msg = its_irq_compose_msi_msg,
Marc Zyngier8d85dce2016-12-19 18:02:13 +00001423 .irq_set_irqchip_state = its_irq_set_irqchip_state,
Marc Zyngierc808eea2016-12-20 09:31:20 +00001424 .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
Marc Zyngierb48ac832014-11-24 14:35:16 +00001425};
1426
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001427
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001428/*
1429 * How we allocate LPIs:
1430 *
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001431 * lpi_range_list contains ranges of LPIs that are to available to
1432 * allocate from. To allocate LPIs, just pick the first range that
1433 * fits the required allocation, and reduce it by the required
1434 * amount. Once empty, remove the range from the list.
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001435 *
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001436 * To free a range of LPIs, add a free range to the list, sort it and
1437 * merge the result if the new range happens to be adjacent to an
1438 * already free block.
1439 *
1440 * The consequence of the above is that allocation is cost is low, but
1441 * freeing is expensive. We assumes that freeing rarely occurs.
1442 */
Jia He4cb205c2018-08-28 12:53:26 +08001443#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001444
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001445static DEFINE_MUTEX(lpi_range_lock);
1446static LIST_HEAD(lpi_range_list);
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001447
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001448struct lpi_range {
1449 struct list_head entry;
1450 u32 base_id;
1451 u32 span;
1452};
1453
1454static struct lpi_range *mk_lpi_range(u32 base, u32 span)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001455{
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001456 struct lpi_range *range;
1457
1458 range = kzalloc(sizeof(*range), GFP_KERNEL);
1459 if (range) {
1460 INIT_LIST_HEAD(&range->entry);
1461 range->base_id = base;
1462 range->span = span;
1463 }
1464
1465 return range;
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001466}
1467
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001468static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001469{
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001470 struct lpi_range *ra, *rb;
1471
1472 ra = container_of(a, struct lpi_range, entry);
1473 rb = container_of(b, struct lpi_range, entry);
1474
1475 return rb->base_id - ra->base_id;
1476}
1477
1478static void merge_lpi_ranges(void)
1479{
1480 struct lpi_range *range, *tmp;
1481
1482 list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
1483 if (!list_is_last(&range->entry, &lpi_range_list) &&
1484 (tmp->base_id == (range->base_id + range->span))) {
1485 tmp->base_id = range->base_id;
1486 tmp->span += range->span;
1487 list_del(&range->entry);
1488 kfree(range);
1489 }
1490 }
1491}
1492
1493static int alloc_lpi_range(u32 nr_lpis, u32 *base)
1494{
1495 struct lpi_range *range, *tmp;
1496 int err = -ENOSPC;
1497
1498 mutex_lock(&lpi_range_lock);
1499
1500 list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
1501 if (range->span >= nr_lpis) {
1502 *base = range->base_id;
1503 range->base_id += nr_lpis;
1504 range->span -= nr_lpis;
1505
1506 if (range->span == 0) {
1507 list_del(&range->entry);
1508 kfree(range);
1509 }
1510
1511 err = 0;
1512 break;
1513 }
1514 }
1515
1516 mutex_unlock(&lpi_range_lock);
1517
1518 pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
1519 return err;
1520}
1521
1522static int free_lpi_range(u32 base, u32 nr_lpis)
1523{
1524 struct lpi_range *new;
1525 int err = 0;
1526
1527 mutex_lock(&lpi_range_lock);
1528
1529 new = mk_lpi_range(base, nr_lpis);
1530 if (!new) {
1531 err = -ENOMEM;
1532 goto out;
1533 }
1534
1535 list_add(&new->entry, &lpi_range_list);
1536 list_sort(NULL, &lpi_range_list, lpi_range_cmp);
1537 merge_lpi_ranges();
1538out:
1539 mutex_unlock(&lpi_range_lock);
1540 return err;
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001541}
1542
Tomasz Nowicki04a0e4d2016-01-19 14:11:18 +01001543static int __init its_lpi_init(u32 id_bits)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001544{
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001545 u32 lpis = (1UL << id_bits) - 8192;
Marc Zyngier12b29052018-05-31 09:01:59 +01001546 u32 numlpis;
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001547 int err;
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001548
Marc Zyngier12b29052018-05-31 09:01:59 +01001549 numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
1550
1551 if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
1552 lpis = numlpis;
1553 pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
1554 lpis);
1555 }
1556
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001557 /*
1558 * Initializing the allocator is just the same as freeing the
1559 * full range of LPIs.
1560 */
1561 err = free_lpi_range(8192, lpis);
1562 pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
1563 return err;
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001564}
1565
Marc Zyngier38dd7c42018-05-27 17:03:03 +01001566static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001567{
1568 unsigned long *bitmap = NULL;
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001569 int err = 0;
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001570
1571 do {
Marc Zyngier38dd7c42018-05-27 17:03:03 +01001572 err = alloc_lpi_range(nr_irqs, base);
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001573 if (!err)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001574 break;
1575
Marc Zyngier38dd7c42018-05-27 17:03:03 +01001576 nr_irqs /= 2;
1577 } while (nr_irqs > 0);
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001578
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001579 if (err)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001580 goto out;
1581
Marc Zyngier38dd7c42018-05-27 17:03:03 +01001582 bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC);
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001583 if (!bitmap)
1584 goto out;
1585
Marc Zyngier38dd7c42018-05-27 17:03:03 +01001586 *nr_ids = nr_irqs;
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001587
1588out:
Marc Zyngierc8415b92015-10-02 16:44:05 +01001589 if (!bitmap)
1590 *base = *nr_ids = 0;
1591
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001592 return bitmap;
1593}
1594
Marc Zyngier38dd7c42018-05-27 17:03:03 +01001595static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001596{
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001597 WARN_ON(free_lpi_range(base, nr_ids));
Marc Zyngiercf2be8b2016-12-19 18:49:59 +00001598 kfree(bitmap);
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001599}
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001600
Marc Zyngier053be482018-07-27 15:02:27 +01001601static void gic_reset_prop_table(void *va)
1602{
1603 /* Priority 0xa0, Group-1, disabled */
1604 memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ);
1605
1606 /* Make sure the GIC will observe the written configuration */
1607 gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ);
1608}
1609
Marc Zyngier0e5ccf92016-12-19 18:15:05 +00001610static struct page *its_allocate_prop_table(gfp_t gfp_flags)
1611{
1612 struct page *prop_page;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001613
Marc Zyngier0e5ccf92016-12-19 18:15:05 +00001614 prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
1615 if (!prop_page)
1616 return NULL;
1617
Marc Zyngier053be482018-07-27 15:02:27 +01001618 gic_reset_prop_table(page_address(prop_page));
Marc Zyngier0e5ccf92016-12-19 18:15:05 +00001619
1620 return prop_page;
1621}
1622
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00001623static void its_free_prop_table(struct page *prop_page)
1624{
1625 free_pages((unsigned long)page_address(prop_page),
1626 get_order(LPI_PROPBASE_SZ));
1627}
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001628
Marc Zyngier11e37d32018-07-27 13:38:54 +01001629static int __init its_setup_lpi_prop_table(void)
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001630{
Marc Zyngiere1a2e202018-07-27 14:36:00 +01001631 struct page *page;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001632
Jia He4cb205c2018-08-28 12:53:26 +08001633 lpi_id_bits = min_t(u32, GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
1634 ITS_MAX_LPI_NRBITS);
Marc Zyngiere1a2e202018-07-27 14:36:00 +01001635 page = its_allocate_prop_table(GFP_NOWAIT);
1636 if (!page) {
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001637 pr_err("Failed to allocate PROPBASE\n");
1638 return -ENOMEM;
1639 }
1640
Marc Zyngiere1a2e202018-07-27 14:36:00 +01001641 gic_rdists->prop_table_pa = page_to_phys(page);
1642 gic_rdists->prop_table_va = page_address(page);
1643
1644 pr_info("GICv3: using LPI property table @%pa\n",
1645 &gic_rdists->prop_table_pa);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001646
Shanker Donthineni6c31e122017-06-22 18:19:14 -05001647 return its_lpi_init(lpi_id_bits);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001648}
1649
1650static const char *its_base_type_string[] = {
1651 [GITS_BASER_TYPE_DEVICE] = "Devices",
1652 [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
Marc Zyngier4f46de92016-12-20 15:50:14 +00001653 [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)",
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001654 [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
1655 [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
1656 [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
1657 [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
1658};
1659
Shanker Donthineni2d81d422016-06-06 18:17:28 -05001660static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
1661{
1662 u32 idx = baser - its->tables;
1663
Vladimir Murzin0968a612016-11-02 11:54:06 +00001664 return gits_read_baser(its->base + GITS_BASER + (idx << 3));
Shanker Donthineni2d81d422016-06-06 18:17:28 -05001665}
1666
1667static void its_write_baser(struct its_node *its, struct its_baser *baser,
1668 u64 val)
1669{
1670 u32 idx = baser - its->tables;
1671
Vladimir Murzin0968a612016-11-02 11:54:06 +00001672 gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
Shanker Donthineni2d81d422016-06-06 18:17:28 -05001673 baser->val = its_read_baser(its, baser);
1674}
1675
Shanker Donthineni93473592016-06-06 18:17:30 -05001676static int its_setup_baser(struct its_node *its, struct its_baser *baser,
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001677 u64 cache, u64 shr, u32 psz, u32 order,
1678 bool indirect)
Shanker Donthineni93473592016-06-06 18:17:30 -05001679{
1680 u64 val = its_read_baser(its, baser);
1681 u64 esz = GITS_BASER_ENTRY_SIZE(val);
1682 u64 type = GITS_BASER_TYPE(val);
Shanker Donthineni30ae9612017-10-09 11:46:55 -05001683 u64 baser_phys, tmp;
Shanker Donthineni93473592016-06-06 18:17:30 -05001684 u32 alloc_pages;
1685 void *base;
Shanker Donthineni93473592016-06-06 18:17:30 -05001686
1687retry_alloc_baser:
1688 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
1689 if (alloc_pages > GITS_BASER_PAGES_MAX) {
1690 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
1691 &its->phys_base, its_base_type_string[type],
1692 alloc_pages, GITS_BASER_PAGES_MAX);
1693 alloc_pages = GITS_BASER_PAGES_MAX;
1694 order = get_order(GITS_BASER_PAGES_MAX * psz);
1695 }
1696
1697 base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
1698 if (!base)
1699 return -ENOMEM;
1700
Shanker Donthineni30ae9612017-10-09 11:46:55 -05001701 baser_phys = virt_to_phys(base);
1702
1703 /* Check if the physical address of the memory is above 48bits */
1704 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
1705
1706 /* 52bit PA is supported only when PageSize=64K */
1707 if (psz != SZ_64K) {
1708 pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
1709 free_pages((unsigned long)base, order);
1710 return -ENXIO;
1711 }
1712
1713 /* Convert 52bit PA to 48bit field */
1714 baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
1715 }
1716
Shanker Donthineni93473592016-06-06 18:17:30 -05001717retry_baser:
Shanker Donthineni30ae9612017-10-09 11:46:55 -05001718 val = (baser_phys |
Shanker Donthineni93473592016-06-06 18:17:30 -05001719 (type << GITS_BASER_TYPE_SHIFT) |
1720 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
1721 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
1722 cache |
1723 shr |
1724 GITS_BASER_VALID);
1725
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001726 val |= indirect ? GITS_BASER_INDIRECT : 0x0;
1727
Shanker Donthineni93473592016-06-06 18:17:30 -05001728 switch (psz) {
1729 case SZ_4K:
1730 val |= GITS_BASER_PAGE_SIZE_4K;
1731 break;
1732 case SZ_16K:
1733 val |= GITS_BASER_PAGE_SIZE_16K;
1734 break;
1735 case SZ_64K:
1736 val |= GITS_BASER_PAGE_SIZE_64K;
1737 break;
1738 }
1739
1740 its_write_baser(its, baser, val);
1741 tmp = baser->val;
1742
1743 if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
1744 /*
1745 * Shareability didn't stick. Just use
1746 * whatever the read reported, which is likely
1747 * to be the only thing this redistributor
1748 * supports. If that's zero, make it
1749 * non-cacheable as well.
1750 */
1751 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
1752 if (!shr) {
1753 cache = GITS_BASER_nC;
Vladimir Murzin328191c2016-11-02 11:54:05 +00001754 gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
Shanker Donthineni93473592016-06-06 18:17:30 -05001755 }
1756 goto retry_baser;
1757 }
1758
1759 if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
1760 /*
1761 * Page size didn't stick. Let's try a smaller
1762 * size and retry. If we reach 4K, then
1763 * something is horribly wrong...
1764 */
1765 free_pages((unsigned long)base, order);
1766 baser->base = NULL;
1767
1768 switch (psz) {
1769 case SZ_16K:
1770 psz = SZ_4K;
1771 goto retry_alloc_baser;
1772 case SZ_64K:
1773 psz = SZ_16K;
1774 goto retry_alloc_baser;
1775 }
1776 }
1777
1778 if (val != tmp) {
Vladimir Murzinb11283e2016-11-02 11:54:03 +00001779 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
Shanker Donthineni93473592016-06-06 18:17:30 -05001780 &its->phys_base, its_base_type_string[type],
Vladimir Murzinb11283e2016-11-02 11:54:03 +00001781 val, tmp);
Shanker Donthineni93473592016-06-06 18:17:30 -05001782 free_pages((unsigned long)base, order);
1783 return -ENXIO;
1784 }
1785
1786 baser->order = order;
1787 baser->base = base;
1788 baser->psz = psz;
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001789 tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
Shanker Donthineni93473592016-06-06 18:17:30 -05001790
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001791 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
Vladimir Murzind524eaa2016-11-02 11:54:04 +00001792 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
Shanker Donthineni93473592016-06-06 18:17:30 -05001793 its_base_type_string[type],
1794 (unsigned long)virt_to_phys(base),
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001795 indirect ? "indirect" : "flat", (int)esz,
Shanker Donthineni93473592016-06-06 18:17:30 -05001796 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
1797
1798 return 0;
1799}
1800
Marc Zyngier4cacac52016-12-19 18:18:34 +00001801static bool its_parse_indirect_baser(struct its_node *its,
1802 struct its_baser *baser,
Shanker Donthineni32bd44d2017-10-07 15:43:48 -05001803 u32 psz, u32 *order, u32 ids)
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001804{
Marc Zyngier4cacac52016-12-19 18:18:34 +00001805 u64 tmp = its_read_baser(its, baser);
1806 u64 type = GITS_BASER_TYPE(tmp);
1807 u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06001808 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001809 u32 new_order = *order;
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001810 bool indirect = false;
1811
1812 /* No need to enable Indirection if memory requirement < (psz*2)bytes */
1813 if ((esz << ids) > (psz * 2)) {
1814 /*
1815 * Find out whether hw supports a single or two-level table by
1816 * table by reading bit at offset '62' after writing '1' to it.
1817 */
1818 its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
1819 indirect = !!(baser->val & GITS_BASER_INDIRECT);
1820
1821 if (indirect) {
1822 /*
1823 * The size of the lvl2 table is equal to ITS page size
1824 * which is 'psz'. For computing lvl1 table size,
1825 * subtract ID bits that sparse lvl2 table from 'ids'
1826 * which is reported by ITS hardware times lvl1 table
1827 * entry size.
1828 */
Vladimir Murzind524eaa2016-11-02 11:54:04 +00001829 ids -= ilog2(psz / (int)esz);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001830 esz = GITS_LVL1_ENTRY_SIZE;
1831 }
1832 }
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001833
1834 /*
1835 * Allocate as many entries as required to fit the
1836 * range of device IDs that the ITS can grok... The ID
1837 * space being incredibly sparse, this results in a
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001838 * massive waste of memory if two-level device table
1839 * feature is not supported by hardware.
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001840 */
1841 new_order = max_t(u32, get_order(esz << ids), new_order);
1842 if (new_order >= MAX_ORDER) {
1843 new_order = MAX_ORDER - 1;
Vladimir Murzind524eaa2016-11-02 11:54:04 +00001844 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
Marc Zyngier4cacac52016-12-19 18:18:34 +00001845 pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n",
1846 &its->phys_base, its_base_type_string[type],
1847 its->device_ids, ids);
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001848 }
1849
1850 *order = new_order;
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001851
1852 return indirect;
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001853}
1854
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001855static void its_free_tables(struct its_node *its)
1856{
1857 int i;
1858
1859 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
Shanker Donthineni1a485f42016-02-01 20:19:44 -06001860 if (its->tables[i].base) {
1861 free_pages((unsigned long)its->tables[i].base,
1862 its->tables[i].order);
1863 its->tables[i].base = NULL;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001864 }
1865 }
1866}
1867
Shanker Donthineni0e0b0f62016-06-06 18:17:31 -05001868static int its_alloc_tables(struct its_node *its)
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001869{
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001870 u64 shr = GITS_BASER_InnerShareable;
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06001871 u64 cache = GITS_BASER_RaWaWb;
Shanker Donthineni93473592016-06-06 18:17:30 -05001872 u32 psz = SZ_64K;
1873 int err, i;
Robert Richter94100972015-09-21 22:58:38 +02001874
Ard Biesheuvelfa150012017-10-17 17:55:54 +01001875 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
1876 /* erratum 24313: ignore memory access type */
1877 cache = GITS_BASER_nCnB;
Shanker Donthineni466b7d12016-03-09 22:10:49 -06001878
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001879 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
Shanker Donthineni2d81d422016-06-06 18:17:28 -05001880 struct its_baser *baser = its->tables + i;
1881 u64 val = its_read_baser(its, baser);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001882 u64 type = GITS_BASER_TYPE(val);
Shanker Donthineni93473592016-06-06 18:17:30 -05001883 u32 order = get_order(psz);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001884 bool indirect = false;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001885
Marc Zyngier4cacac52016-12-19 18:18:34 +00001886 switch (type) {
1887 case GITS_BASER_TYPE_NONE:
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001888 continue;
1889
Marc Zyngier4cacac52016-12-19 18:18:34 +00001890 case GITS_BASER_TYPE_DEVICE:
Shanker Donthineni32bd44d2017-10-07 15:43:48 -05001891 indirect = its_parse_indirect_baser(its, baser,
1892 psz, &order,
1893 its->device_ids);
Marc Zyngier4cacac52016-12-19 18:18:34 +00001894 case GITS_BASER_TYPE_VCPU:
1895 indirect = its_parse_indirect_baser(its, baser,
Shanker Donthineni32bd44d2017-10-07 15:43:48 -05001896 psz, &order,
1897 ITS_MAX_VPEID_BITS);
Marc Zyngier4cacac52016-12-19 18:18:34 +00001898 break;
1899 }
Marc Zyngierf54b97e2015-03-06 16:37:41 +00001900
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001901 err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
Shanker Donthineni93473592016-06-06 18:17:30 -05001902 if (err < 0) {
1903 its_free_tables(its);
1904 return err;
Robert Richter30f21362015-09-21 22:58:34 +02001905 }
1906
Shanker Donthineni93473592016-06-06 18:17:30 -05001907 /* Update settings which will be used for next BASERn */
1908 psz = baser->psz;
1909 cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
1910 shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001911 }
1912
1913 return 0;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001914}
1915
1916static int its_alloc_collections(struct its_node *its)
1917{
Marc Zyngier83559b42018-06-22 10:52:52 +01001918 int i;
1919
Kees Cook6396bb22018-06-12 14:03:40 -07001920 its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001921 GFP_KERNEL);
1922 if (!its->collections)
1923 return -ENOMEM;
1924
Marc Zyngier83559b42018-06-22 10:52:52 +01001925 for (i = 0; i < nr_cpu_ids; i++)
1926 its->collections[i].target_address = ~0ULL;
1927
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001928 return 0;
1929}
1930
Marc Zyngier7c297a22016-12-19 18:34:38 +00001931static struct page *its_allocate_pending_table(gfp_t gfp_flags)
1932{
1933 struct page *pend_page;
Marc Zyngieradaab502018-07-17 18:06:39 +01001934
Marc Zyngier7c297a22016-12-19 18:34:38 +00001935 pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
Marc Zyngieradaab502018-07-17 18:06:39 +01001936 get_order(LPI_PENDBASE_SZ));
Marc Zyngier7c297a22016-12-19 18:34:38 +00001937 if (!pend_page)
1938 return NULL;
1939
1940 /* Make sure the GIC will observe the zero-ed page */
1941 gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
1942
1943 return pend_page;
1944}
1945
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00001946static void its_free_pending_table(struct page *pt)
1947{
Marc Zyngieradaab502018-07-17 18:06:39 +01001948 free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00001949}
1950
Marc Zyngier11e37d32018-07-27 13:38:54 +01001951static int __init allocate_lpi_tables(void)
1952{
1953 int err, cpu;
1954
1955 err = its_setup_lpi_prop_table();
1956 if (err)
1957 return err;
1958
1959 /*
1960 * We allocate all the pending tables anyway, as we may have a
1961 * mix of RDs that have had LPIs enabled, and some that
1962 * don't. We'll free the unused ones as each CPU comes online.
1963 */
1964 for_each_possible_cpu(cpu) {
1965 struct page *pend_page;
1966
1967 pend_page = its_allocate_pending_table(GFP_NOWAIT);
1968 if (!pend_page) {
1969 pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
1970 return -ENOMEM;
1971 }
1972
1973 gic_data_rdist_cpu(cpu)->pend_page = pend_page;
1974 }
1975
1976 return 0;
1977}
1978
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001979static void its_cpu_init_lpis(void)
1980{
1981 void __iomem *rbase = gic_data_rdist_rd_base();
1982 struct page *pend_page;
Marc Zyngier11e37d32018-07-27 13:38:54 +01001983 phys_addr_t paddr;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001984 u64 val, tmp;
1985
Marc Zyngier11e37d32018-07-27 13:38:54 +01001986 if (gic_data_rdist()->lpi_enabled)
1987 return;
1988
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001989 pend_page = gic_data_rdist()->pend_page;
Marc Zyngier11e37d32018-07-27 13:38:54 +01001990 paddr = page_to_phys(pend_page);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001991
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001992 /* set PROPBASE */
Marc Zyngiere1a2e202018-07-27 14:36:00 +01001993 val = (gic_rdists->prop_table_pa |
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001994 GICR_PROPBASER_InnerShareable |
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06001995 GICR_PROPBASER_RaWaWb |
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001996 ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
1997
Vladimir Murzin0968a612016-11-02 11:54:06 +00001998 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
1999 tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002000
2001 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
Marc Zyngier241a3862015-03-27 14:15:05 +00002002 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
2003 /*
2004 * The HW reports non-shareable, we must
2005 * remove the cacheability attributes as
2006 * well.
2007 */
2008 val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
2009 GICR_PROPBASER_CACHEABILITY_MASK);
2010 val |= GICR_PROPBASER_nC;
Vladimir Murzin0968a612016-11-02 11:54:06 +00002011 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
Marc Zyngier241a3862015-03-27 14:15:05 +00002012 }
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002013 pr_info_once("GIC: using cache flushing for LPI property table\n");
2014 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
2015 }
2016
2017 /* set PENDBASE */
2018 val = (page_to_phys(pend_page) |
Marc Zyngier4ad3e362015-03-27 14:15:04 +00002019 GICR_PENDBASER_InnerShareable |
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06002020 GICR_PENDBASER_RaWaWb);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002021
Vladimir Murzin0968a612016-11-02 11:54:06 +00002022 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
2023 tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
Marc Zyngier241a3862015-03-27 14:15:05 +00002024
2025 if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
2026 /*
2027 * The HW reports non-shareable, we must remove the
2028 * cacheability attributes as well.
2029 */
2030 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
2031 GICR_PENDBASER_CACHEABILITY_MASK);
2032 val |= GICR_PENDBASER_nC;
Vladimir Murzin0968a612016-11-02 11:54:06 +00002033 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
Marc Zyngier241a3862015-03-27 14:15:05 +00002034 }
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002035
2036 /* Enable LPIs */
2037 val = readl_relaxed(rbase + GICR_CTLR);
2038 val |= GICR_CTLR_ENABLE_LPIS;
2039 writel_relaxed(val, rbase + GICR_CTLR);
2040
2041 /* Make sure the GIC has seen the above */
2042 dsb(sy);
Marc Zyngier11e37d32018-07-27 13:38:54 +01002043 gic_data_rdist()->lpi_enabled = true;
2044 pr_info("GICv3: CPU%d: using LPI pending table @%pa\n",
2045 smp_processor_id(),
2046 &paddr);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002047}
2048
Derek Basehore920181c2018-02-28 21:48:20 -08002049static void its_cpu_init_collection(struct its_node *its)
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002050{
Derek Basehore920181c2018-02-28 21:48:20 -08002051 int cpu = smp_processor_id();
2052 u64 target;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002053
Derek Basehore920181c2018-02-28 21:48:20 -08002054 /* avoid cross node collections and its mapping */
2055 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
2056 struct device_node *cpu_node;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002057
Derek Basehore920181c2018-02-28 21:48:20 -08002058 cpu_node = of_get_cpu_node(cpu, NULL);
2059 if (its->numa_node != NUMA_NO_NODE &&
2060 its->numa_node != of_node_to_nid(cpu_node))
2061 return;
2062 }
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002063
Derek Basehore920181c2018-02-28 21:48:20 -08002064 /*
2065 * We now have to bind each collection to its target
2066 * redistributor.
2067 */
2068 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002069 /*
Derek Basehore920181c2018-02-28 21:48:20 -08002070 * This ITS wants the physical address of the
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002071 * redistributor.
2072 */
Derek Basehore920181c2018-02-28 21:48:20 -08002073 target = gic_data_rdist()->phys_base;
2074 } else {
2075 /* This ITS wants a linear CPU number. */
2076 target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2077 target = GICR_TYPER_CPU_NUMBER(target) << 16;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002078 }
2079
Derek Basehore920181c2018-02-28 21:48:20 -08002080 /* Perform collection mapping */
2081 its->collections[cpu].target_address = target;
2082 its->collections[cpu].col_id = cpu;
2083
2084 its_send_mapc(its, &its->collections[cpu], 1);
2085 its_send_invall(its, &its->collections[cpu]);
2086}
2087
2088static void its_cpu_init_collections(void)
2089{
2090 struct its_node *its;
2091
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02002092 raw_spin_lock(&its_lock);
Derek Basehore920181c2018-02-28 21:48:20 -08002093
2094 list_for_each_entry(its, &its_nodes, entry)
2095 its_cpu_init_collection(its);
2096
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02002097 raw_spin_unlock(&its_lock);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002098}
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002099
2100static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
2101{
2102 struct its_device *its_dev = NULL, *tmp;
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002103 unsigned long flags;
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002104
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002105 raw_spin_lock_irqsave(&its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002106
2107 list_for_each_entry(tmp, &its->its_device_list, entry) {
2108 if (tmp->device_id == dev_id) {
2109 its_dev = tmp;
2110 break;
2111 }
2112 }
2113
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002114 raw_spin_unlock_irqrestore(&its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002115
2116 return its_dev;
2117}
2118
Shanker Donthineni466b7d12016-03-09 22:10:49 -06002119static struct its_baser *its_get_baser(struct its_node *its, u32 type)
2120{
2121 int i;
2122
2123 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2124 if (GITS_BASER_TYPE(its->tables[i].val) == type)
2125 return &its->tables[i];
2126 }
2127
2128 return NULL;
2129}
2130
Marc Zyngier70cc81e2016-12-19 18:53:02 +00002131static bool its_alloc_table_entry(struct its_baser *baser, u32 id)
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002132{
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002133 struct page *page;
2134 u32 esz, idx;
2135 __le64 *table;
2136
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002137 /* Don't allow device id that exceeds single, flat table limit */
2138 esz = GITS_BASER_ENTRY_SIZE(baser->val);
2139 if (!(baser->val & GITS_BASER_INDIRECT))
Marc Zyngier70cc81e2016-12-19 18:53:02 +00002140 return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002141
2142 /* Compute 1st level table index & check if that exceeds table limit */
Marc Zyngier70cc81e2016-12-19 18:53:02 +00002143 idx = id >> ilog2(baser->psz / esz);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002144 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
2145 return false;
2146
2147 table = baser->base;
2148
2149 /* Allocate memory for 2nd level table */
2150 if (!table[idx]) {
2151 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz));
2152 if (!page)
2153 return false;
2154
2155 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
2156 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
Vladimir Murzin328191c2016-11-02 11:54:05 +00002157 gic_flush_dcache_to_poc(page_address(page), baser->psz);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002158
2159 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
2160
2161 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
2162 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
Vladimir Murzin328191c2016-11-02 11:54:05 +00002163 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002164
2165 /* Ensure updated table contents are visible to ITS hardware */
2166 dsb(sy);
2167 }
2168
2169 return true;
2170}
2171
Marc Zyngier70cc81e2016-12-19 18:53:02 +00002172static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
2173{
2174 struct its_baser *baser;
2175
2176 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
2177
2178 /* Don't allow device id that exceeds ITS hardware limit */
2179 if (!baser)
2180 return (ilog2(dev_id) < its->device_ids);
2181
2182 return its_alloc_table_entry(baser, dev_id);
2183}
2184
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002185static bool its_alloc_vpe_table(u32 vpe_id)
2186{
2187 struct its_node *its;
2188
2189 /*
2190 * Make sure the L2 tables are allocated on *all* v4 ITSs. We
2191 * could try and only do it on ITSs corresponding to devices
2192 * that have interrupts targeted at this VPE, but the
2193 * complexity becomes crazy (and you have tons of memory
2194 * anyway, right?).
2195 */
2196 list_for_each_entry(its, &its_nodes, entry) {
2197 struct its_baser *baser;
2198
2199 if (!its->is_v4)
2200 continue;
2201
2202 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
2203 if (!baser)
2204 return false;
2205
2206 if (!its_alloc_table_entry(baser, vpe_id))
2207 return false;
2208 }
2209
2210 return true;
2211}
2212
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002213static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002214 int nvecs, bool alloc_lpis)
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002215{
2216 struct its_device *dev;
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002217 unsigned long *lpi_map = NULL;
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002218 unsigned long flags;
Marc Zyngier591e5be2015-07-17 10:46:42 +01002219 u16 *col_map = NULL;
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002220 void *itt;
2221 int lpi_base;
2222 int nr_lpis;
Marc Zyngierc8481262014-12-12 10:51:24 +00002223 int nr_ites;
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002224 int sz;
2225
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002226 if (!its_alloc_device_table(its, dev_id))
Shanker Donthineni466b7d12016-03-09 22:10:49 -06002227 return NULL;
2228
Marc Zyngier147c8f32018-05-27 16:39:55 +01002229 if (WARN_ON(!is_power_of_2(nvecs)))
2230 nvecs = roundup_pow_of_two(nvecs);
2231
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002232 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
Marc Zyngierc8481262014-12-12 10:51:24 +00002233 /*
Marc Zyngier147c8f32018-05-27 16:39:55 +01002234 * Even if the device wants a single LPI, the ITT must be
2235 * sized as a power of two (and you need at least one bit...).
Marc Zyngierc8481262014-12-12 10:51:24 +00002236 */
Marc Zyngier147c8f32018-05-27 16:39:55 +01002237 nr_ites = max(2, nvecs);
Marc Zyngierc8481262014-12-12 10:51:24 +00002238 sz = nr_ites * its->ite_size;
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002239 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
Yun Wu6c834122015-03-06 16:37:46 +00002240 itt = kzalloc(sz, GFP_KERNEL);
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002241 if (alloc_lpis) {
Marc Zyngier38dd7c42018-05-27 17:03:03 +01002242 lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002243 if (lpi_map)
Kees Cook6396bb22018-06-12 14:03:40 -07002244 col_map = kcalloc(nr_lpis, sizeof(*col_map),
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002245 GFP_KERNEL);
2246 } else {
Kees Cook6396bb22018-06-12 14:03:40 -07002247 col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL);
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002248 nr_lpis = 0;
2249 lpi_base = 0;
2250 }
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002251
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002252 if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002253 kfree(dev);
2254 kfree(itt);
2255 kfree(lpi_map);
Marc Zyngier591e5be2015-07-17 10:46:42 +01002256 kfree(col_map);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002257 return NULL;
2258 }
2259
Vladimir Murzin328191c2016-11-02 11:54:05 +00002260 gic_flush_dcache_to_poc(itt, sz);
Marc Zyngier5a9a8912015-09-13 12:14:32 +01002261
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002262 dev->its = its;
2263 dev->itt = itt;
Marc Zyngierc8481262014-12-12 10:51:24 +00002264 dev->nr_ites = nr_ites;
Marc Zyngier591e5be2015-07-17 10:46:42 +01002265 dev->event_map.lpi_map = lpi_map;
2266 dev->event_map.col_map = col_map;
2267 dev->event_map.lpi_base = lpi_base;
2268 dev->event_map.nr_lpis = nr_lpis;
Marc Zyngierd011e4e2016-12-20 09:44:41 +00002269 mutex_init(&dev->event_map.vlpi_lock);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002270 dev->device_id = dev_id;
2271 INIT_LIST_HEAD(&dev->entry);
2272
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002273 raw_spin_lock_irqsave(&its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002274 list_add(&dev->entry, &its->its_device_list);
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002275 raw_spin_unlock_irqrestore(&its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002276
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002277 /* Map device to its ITT */
2278 its_send_mapd(dev, 1);
2279
2280 return dev;
2281}
2282
2283static void its_free_device(struct its_device *its_dev)
2284{
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002285 unsigned long flags;
2286
2287 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002288 list_del(&its_dev->entry);
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002289 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002290 kfree(its_dev->itt);
2291 kfree(its_dev);
2292}
Marc Zyngierb48ac832014-11-24 14:35:16 +00002293
2294static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
2295{
2296 int idx;
2297
Marc Zyngier591e5be2015-07-17 10:46:42 +01002298 idx = find_first_zero_bit(dev->event_map.lpi_map,
2299 dev->event_map.nr_lpis);
2300 if (idx == dev->event_map.nr_lpis)
Marc Zyngierb48ac832014-11-24 14:35:16 +00002301 return -ENOSPC;
2302
Marc Zyngier591e5be2015-07-17 10:46:42 +01002303 *hwirq = dev->event_map.lpi_base + idx;
2304 set_bit(idx, dev->event_map.lpi_map);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002305
Marc Zyngierb48ac832014-11-24 14:35:16 +00002306 return 0;
2307}
2308
Marc Zyngier54456db2015-07-28 14:46:21 +01002309static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
2310 int nvec, msi_alloc_info_t *info)
Marc Zyngiere8137f42015-03-06 16:37:42 +00002311{
Marc Zyngierb48ac832014-11-24 14:35:16 +00002312 struct its_node *its;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002313 struct its_device *its_dev;
Marc Zyngier54456db2015-07-28 14:46:21 +01002314 struct msi_domain_info *msi_info;
2315 u32 dev_id;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002316
Marc Zyngier54456db2015-07-28 14:46:21 +01002317 /*
2318 * We ignore "dev" entierely, and rely on the dev_id that has
2319 * been passed via the scratchpad. This limits this domain's
2320 * usefulness to upper layers that definitely know that they
2321 * are built on top of the ITS.
2322 */
2323 dev_id = info->scratchpad[0].ul;
2324
2325 msi_info = msi_get_domain_info(domain);
2326 its = msi_info->data;
2327
Marc Zyngier20b3d542016-12-20 15:23:22 +00002328 if (!gic_rdists->has_direct_lpi &&
2329 vpe_proxy.dev &&
2330 vpe_proxy.dev->its == its &&
2331 dev_id == vpe_proxy.dev->device_id) {
2332 /* Bad luck. Get yourself a better implementation */
2333 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
2334 dev_id);
2335 return -EINVAL;
2336 }
2337
Marc Zyngierf1304202015-07-28 14:46:18 +01002338 its_dev = its_find_device(its, dev_id);
Marc Zyngiere8137f42015-03-06 16:37:42 +00002339 if (its_dev) {
2340 /*
2341 * We already have seen this ID, probably through
2342 * another alias (PCI bridge of some sort). No need to
2343 * create the device.
2344 */
Marc Zyngierf1304202015-07-28 14:46:18 +01002345 pr_debug("Reusing ITT for devID %x\n", dev_id);
Marc Zyngiere8137f42015-03-06 16:37:42 +00002346 goto out;
2347 }
Marc Zyngierb48ac832014-11-24 14:35:16 +00002348
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002349 its_dev = its_create_device(its, dev_id, nvec, true);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002350 if (!its_dev)
2351 return -ENOMEM;
2352
Marc Zyngierf1304202015-07-28 14:46:18 +01002353 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
Marc Zyngiere8137f42015-03-06 16:37:42 +00002354out:
Marc Zyngierb48ac832014-11-24 14:35:16 +00002355 info->scratchpad[0].ptr = its_dev;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002356 return 0;
2357}
2358
Marc Zyngier54456db2015-07-28 14:46:21 +01002359static struct msi_domain_ops its_msi_domain_ops = {
2360 .msi_prepare = its_msi_prepare,
2361};
2362
Marc Zyngierb48ac832014-11-24 14:35:16 +00002363static int its_irq_gic_domain_alloc(struct irq_domain *domain,
2364 unsigned int virq,
2365 irq_hw_number_t hwirq)
2366{
Marc Zyngierf833f572015-10-13 12:51:33 +01002367 struct irq_fwspec fwspec;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002368
Marc Zyngierf833f572015-10-13 12:51:33 +01002369 if (irq_domain_get_of_node(domain->parent)) {
2370 fwspec.fwnode = domain->parent->fwnode;
2371 fwspec.param_count = 3;
2372 fwspec.param[0] = GIC_IRQ_TYPE_LPI;
2373 fwspec.param[1] = hwirq;
2374 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02002375 } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
2376 fwspec.fwnode = domain->parent->fwnode;
2377 fwspec.param_count = 2;
2378 fwspec.param[0] = hwirq;
2379 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
Marc Zyngierf833f572015-10-13 12:51:33 +01002380 } else {
2381 return -EINVAL;
2382 }
Marc Zyngierb48ac832014-11-24 14:35:16 +00002383
Marc Zyngierf833f572015-10-13 12:51:33 +01002384 return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002385}
2386
2387static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2388 unsigned int nr_irqs, void *args)
2389{
2390 msi_alloc_info_t *info = args;
2391 struct its_device *its_dev = info->scratchpad[0].ptr;
2392 irq_hw_number_t hwirq;
2393 int err;
2394 int i;
2395
2396 for (i = 0; i < nr_irqs; i++) {
2397 err = its_alloc_device_irq(its_dev, &hwirq);
2398 if (err)
2399 return err;
2400
2401 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq);
2402 if (err)
2403 return err;
2404
2405 irq_domain_set_hwirq_and_chip(domain, virq + i,
2406 hwirq, &its_irq_chip, its_dev);
Marc Zyngier0d224d32017-08-18 09:39:18 +01002407 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
Marc Zyngierf1304202015-07-28 14:46:18 +01002408 pr_debug("ID:%d pID:%d vID:%d\n",
2409 (int)(hwirq - its_dev->event_map.lpi_base),
2410 (int) hwirq, virq + i);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002411 }
2412
2413 return 0;
2414}
2415
Thomas Gleixner72491642017-09-13 23:29:10 +02002416static int its_irq_domain_activate(struct irq_domain *domain,
Thomas Gleixner702cb0a2017-12-29 16:59:06 +01002417 struct irq_data *d, bool reserve)
Marc Zyngieraca268d2014-12-12 10:51:23 +00002418{
2419 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2420 u32 event = its_get_event_id(d);
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02002421 const struct cpumask *cpu_mask = cpu_online_mask;
Marc Zyngier0d224d32017-08-18 09:39:18 +01002422 int cpu;
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02002423
2424 /* get the cpu_mask of local node */
2425 if (its_dev->its->numa_node >= 0)
2426 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
Marc Zyngieraca268d2014-12-12 10:51:23 +00002427
Marc Zyngier591e5be2015-07-17 10:46:42 +01002428 /* Bind the LPI to the first possible CPU */
Yang Yingliangc1797b12018-06-22 10:52:51 +01002429 cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
2430 if (cpu >= nr_cpu_ids) {
2431 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)
2432 return -EINVAL;
2433
2434 cpu = cpumask_first(cpu_online_mask);
2435 }
2436
Marc Zyngier0d224d32017-08-18 09:39:18 +01002437 its_dev->event_map.col_map[event] = cpu;
2438 irq_data_update_effective_affinity(d, cpumask_of(cpu));
Marc Zyngier591e5be2015-07-17 10:46:42 +01002439
Marc Zyngieraca268d2014-12-12 10:51:23 +00002440 /* Map the GIC IRQ and event to the device */
Marc Zyngier6a25ad32016-12-20 15:52:26 +00002441 its_send_mapti(its_dev, d->hwirq, event);
Thomas Gleixner72491642017-09-13 23:29:10 +02002442 return 0;
Marc Zyngieraca268d2014-12-12 10:51:23 +00002443}
2444
2445static void its_irq_domain_deactivate(struct irq_domain *domain,
2446 struct irq_data *d)
2447{
2448 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2449 u32 event = its_get_event_id(d);
2450
2451 /* Stop the delivery of interrupts */
2452 its_send_discard(its_dev, event);
2453}
2454
Marc Zyngierb48ac832014-11-24 14:35:16 +00002455static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
2456 unsigned int nr_irqs)
2457{
2458 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
2459 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2460 int i;
2461
2462 for (i = 0; i < nr_irqs; i++) {
2463 struct irq_data *data = irq_domain_get_irq_data(domain,
2464 virq + i);
Marc Zyngieraca268d2014-12-12 10:51:23 +00002465 u32 event = its_get_event_id(data);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002466
2467 /* Mark interrupt index as unused */
Marc Zyngier591e5be2015-07-17 10:46:42 +01002468 clear_bit(event, its_dev->event_map.lpi_map);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002469
2470 /* Nuke the entry in the domain */
Marc Zyngier2da39942014-12-12 10:51:22 +00002471 irq_domain_reset_irq_data(data);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002472 }
2473
2474 /* If all interrupts have been freed, start mopping the floor */
Marc Zyngier591e5be2015-07-17 10:46:42 +01002475 if (bitmap_empty(its_dev->event_map.lpi_map,
2476 its_dev->event_map.nr_lpis)) {
Marc Zyngier38dd7c42018-05-27 17:03:03 +01002477 its_lpi_free(its_dev->event_map.lpi_map,
2478 its_dev->event_map.lpi_base,
2479 its_dev->event_map.nr_lpis);
Marc Zyngiercf2be8b2016-12-19 18:49:59 +00002480 kfree(its_dev->event_map.col_map);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002481
2482 /* Unmap device/itt */
2483 its_send_mapd(its_dev, 0);
2484 its_free_device(its_dev);
2485 }
2486
2487 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
2488}
2489
2490static const struct irq_domain_ops its_domain_ops = {
2491 .alloc = its_irq_domain_alloc,
2492 .free = its_irq_domain_free,
Marc Zyngieraca268d2014-12-12 10:51:23 +00002493 .activate = its_irq_domain_activate,
2494 .deactivate = its_irq_domain_deactivate,
Marc Zyngierb48ac832014-11-24 14:35:16 +00002495};
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00002496
Marc Zyngier20b3d542016-12-20 15:23:22 +00002497/*
2498 * This is insane.
2499 *
2500 * If a GICv4 doesn't implement Direct LPIs (which is extremely
2501 * likely), the only way to perform an invalidate is to use a fake
2502 * device to issue an INV command, implying that the LPI has first
2503 * been mapped to some event on that device. Since this is not exactly
2504 * cheap, we try to keep that mapping around as long as possible, and
2505 * only issue an UNMAP if we're short on available slots.
2506 *
2507 * Broken by design(tm).
2508 */
2509static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
2510{
2511 /* Already unmapped? */
2512 if (vpe->vpe_proxy_event == -1)
2513 return;
2514
2515 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
2516 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
2517
2518 /*
2519 * We don't track empty slots at all, so let's move the
2520 * next_victim pointer if we can quickly reuse that slot
2521 * instead of nuking an existing entry. Not clear that this is
2522 * always a win though, and this might just generate a ripple
2523 * effect... Let's just hope VPEs don't migrate too often.
2524 */
2525 if (vpe_proxy.vpes[vpe_proxy.next_victim])
2526 vpe_proxy.next_victim = vpe->vpe_proxy_event;
2527
2528 vpe->vpe_proxy_event = -1;
2529}
2530
2531static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
2532{
2533 if (!gic_rdists->has_direct_lpi) {
2534 unsigned long flags;
2535
2536 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2537 its_vpe_db_proxy_unmap_locked(vpe);
2538 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2539 }
2540}
2541
2542static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
2543{
2544 /* Already mapped? */
2545 if (vpe->vpe_proxy_event != -1)
2546 return;
2547
2548 /* This slot was already allocated. Kick the other VPE out. */
2549 if (vpe_proxy.vpes[vpe_proxy.next_victim])
2550 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
2551
2552 /* Map the new VPE instead */
2553 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
2554 vpe->vpe_proxy_event = vpe_proxy.next_victim;
2555 vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
2556
2557 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
2558 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
2559}
2560
Marc Zyngier958b90d2017-08-18 16:14:17 +01002561static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
2562{
2563 unsigned long flags;
2564 struct its_collection *target_col;
2565
2566 if (gic_rdists->has_direct_lpi) {
2567 void __iomem *rdbase;
2568
2569 rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
2570 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
2571 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2572 cpu_relax();
2573
2574 return;
2575 }
2576
2577 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2578
2579 its_vpe_db_proxy_map_locked(vpe);
2580
2581 target_col = &vpe_proxy.dev->its->collections[to];
2582 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
2583 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
2584
2585 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2586}
2587
Marc Zyngier3171a472016-12-20 15:17:28 +00002588static int its_vpe_set_affinity(struct irq_data *d,
2589 const struct cpumask *mask_val,
2590 bool force)
2591{
2592 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2593 int cpu = cpumask_first(mask_val);
2594
2595 /*
2596 * Changing affinity is mega expensive, so let's be as lazy as
Marc Zyngier20b3d542016-12-20 15:23:22 +00002597 * we can and only do it if we really have to. Also, if mapped
Marc Zyngier958b90d2017-08-18 16:14:17 +01002598 * into the proxy device, we need to move the doorbell
2599 * interrupt to its new location.
Marc Zyngier3171a472016-12-20 15:17:28 +00002600 */
2601 if (vpe->col_idx != cpu) {
Marc Zyngier958b90d2017-08-18 16:14:17 +01002602 int from = vpe->col_idx;
2603
Marc Zyngier3171a472016-12-20 15:17:28 +00002604 vpe->col_idx = cpu;
2605 its_send_vmovp(vpe);
Marc Zyngier958b90d2017-08-18 16:14:17 +01002606 its_vpe_db_proxy_move(vpe, from, cpu);
Marc Zyngier3171a472016-12-20 15:17:28 +00002607 }
2608
Marc Zyngier44c4c252017-10-19 10:11:34 +01002609 irq_data_update_effective_affinity(d, cpumask_of(cpu));
2610
Marc Zyngier3171a472016-12-20 15:17:28 +00002611 return IRQ_SET_MASK_OK_DONE;
2612}
2613
Marc Zyngiere643d802016-12-20 15:09:31 +00002614static void its_vpe_schedule(struct its_vpe *vpe)
2615{
Robin Murphy50c33092018-02-16 16:57:56 +00002616 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
Marc Zyngiere643d802016-12-20 15:09:31 +00002617 u64 val;
2618
2619 /* Schedule the VPE */
2620 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
2621 GENMASK_ULL(51, 12);
2622 val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
2623 val |= GICR_VPROPBASER_RaWb;
2624 val |= GICR_VPROPBASER_InnerShareable;
2625 gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2626
2627 val = virt_to_phys(page_address(vpe->vpt_page)) &
2628 GENMASK_ULL(51, 16);
2629 val |= GICR_VPENDBASER_RaWaWb;
2630 val |= GICR_VPENDBASER_NonShareable;
2631 /*
2632 * There is no good way of finding out if the pending table is
2633 * empty as we can race against the doorbell interrupt very
2634 * easily. So in the end, vpe->pending_last is only an
2635 * indication that the vcpu has something pending, not one
2636 * that the pending table is empty. A good implementation
2637 * would be able to read its coarse map pretty quickly anyway,
2638 * making this a tolerable issue.
2639 */
2640 val |= GICR_VPENDBASER_PendingLast;
2641 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
2642 val |= GICR_VPENDBASER_Valid;
2643 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2644}
2645
2646static void its_vpe_deschedule(struct its_vpe *vpe)
2647{
Robin Murphy50c33092018-02-16 16:57:56 +00002648 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
Marc Zyngiere643d802016-12-20 15:09:31 +00002649 u32 count = 1000000; /* 1s! */
2650 bool clean;
2651 u64 val;
2652
2653 /* We're being scheduled out */
2654 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2655 val &= ~GICR_VPENDBASER_Valid;
2656 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2657
2658 do {
2659 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2660 clean = !(val & GICR_VPENDBASER_Dirty);
2661 if (!clean) {
2662 count--;
2663 cpu_relax();
2664 udelay(1);
2665 }
2666 } while (!clean && count);
2667
2668 if (unlikely(!clean && !count)) {
2669 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
2670 vpe->idai = false;
2671 vpe->pending_last = true;
2672 } else {
2673 vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
2674 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
2675 }
2676}
2677
Marc Zyngier40619a22017-10-08 15:16:09 +01002678static void its_vpe_invall(struct its_vpe *vpe)
2679{
2680 struct its_node *its;
2681
2682 list_for_each_entry(its, &its_nodes, entry) {
2683 if (!its->is_v4)
2684 continue;
2685
Marc Zyngier2247e1b2017-10-08 18:50:36 +01002686 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
2687 continue;
2688
Marc Zyngier3c1ccee2017-10-09 13:17:43 +01002689 /*
2690 * Sending a VINVALL to a single ITS is enough, as all
2691 * we need is to reach the redistributors.
2692 */
Marc Zyngier40619a22017-10-08 15:16:09 +01002693 its_send_vinvall(its, vpe);
Marc Zyngier3c1ccee2017-10-09 13:17:43 +01002694 return;
Marc Zyngier40619a22017-10-08 15:16:09 +01002695 }
2696}
2697
Marc Zyngiere643d802016-12-20 15:09:31 +00002698static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
2699{
2700 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2701 struct its_cmd_info *info = vcpu_info;
2702
2703 switch (info->cmd_type) {
2704 case SCHEDULE_VPE:
2705 its_vpe_schedule(vpe);
2706 return 0;
2707
2708 case DESCHEDULE_VPE:
2709 its_vpe_deschedule(vpe);
2710 return 0;
2711
Marc Zyngier5e2f7642016-12-20 15:10:50 +00002712 case INVALL_VPE:
Marc Zyngier40619a22017-10-08 15:16:09 +01002713 its_vpe_invall(vpe);
Marc Zyngier5e2f7642016-12-20 15:10:50 +00002714 return 0;
2715
Marc Zyngiere643d802016-12-20 15:09:31 +00002716 default:
2717 return -EINVAL;
2718 }
2719}
2720
Marc Zyngier20b3d542016-12-20 15:23:22 +00002721static void its_vpe_send_cmd(struct its_vpe *vpe,
2722 void (*cmd)(struct its_device *, u32))
2723{
2724 unsigned long flags;
2725
2726 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2727
2728 its_vpe_db_proxy_map_locked(vpe);
2729 cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
2730
2731 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2732}
2733
Marc Zyngierf6a91da2016-12-20 15:20:38 +00002734static void its_vpe_send_inv(struct irq_data *d)
2735{
2736 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
Marc Zyngierf6a91da2016-12-20 15:20:38 +00002737
Marc Zyngier20b3d542016-12-20 15:23:22 +00002738 if (gic_rdists->has_direct_lpi) {
2739 void __iomem *rdbase;
2740
2741 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
2742 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR);
2743 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2744 cpu_relax();
2745 } else {
2746 its_vpe_send_cmd(vpe, its_send_inv);
2747 }
Marc Zyngierf6a91da2016-12-20 15:20:38 +00002748}
2749
2750static void its_vpe_mask_irq(struct irq_data *d)
2751{
2752 /*
2753 * We need to unmask the LPI, which is described by the parent
2754 * irq_data. Instead of calling into the parent (which won't
2755 * exactly do the right thing, let's simply use the
2756 * parent_data pointer. Yes, I'm naughty.
2757 */
2758 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
2759 its_vpe_send_inv(d);
2760}
2761
2762static void its_vpe_unmask_irq(struct irq_data *d)
2763{
2764 /* Same hack as above... */
2765 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
2766 its_vpe_send_inv(d);
2767}
2768
Marc Zyngiere57a3e282017-07-31 14:47:24 +01002769static int its_vpe_set_irqchip_state(struct irq_data *d,
2770 enum irqchip_irq_state which,
2771 bool state)
2772{
2773 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2774
2775 if (which != IRQCHIP_STATE_PENDING)
2776 return -EINVAL;
2777
2778 if (gic_rdists->has_direct_lpi) {
2779 void __iomem *rdbase;
2780
2781 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
2782 if (state) {
2783 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
2784 } else {
2785 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
2786 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2787 cpu_relax();
2788 }
2789 } else {
2790 if (state)
2791 its_vpe_send_cmd(vpe, its_send_int);
2792 else
2793 its_vpe_send_cmd(vpe, its_send_clear);
2794 }
2795
2796 return 0;
2797}
2798
Marc Zyngier8fff27a2016-12-20 13:41:55 +00002799static struct irq_chip its_vpe_irq_chip = {
2800 .name = "GICv4-vpe",
Marc Zyngierf6a91da2016-12-20 15:20:38 +00002801 .irq_mask = its_vpe_mask_irq,
2802 .irq_unmask = its_vpe_unmask_irq,
2803 .irq_eoi = irq_chip_eoi_parent,
Marc Zyngier3171a472016-12-20 15:17:28 +00002804 .irq_set_affinity = its_vpe_set_affinity,
Marc Zyngiere57a3e282017-07-31 14:47:24 +01002805 .irq_set_irqchip_state = its_vpe_set_irqchip_state,
Marc Zyngiere643d802016-12-20 15:09:31 +00002806 .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
Marc Zyngier8fff27a2016-12-20 13:41:55 +00002807};
2808
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002809static int its_vpe_id_alloc(void)
2810{
Shanker Donthineni32bd44d2017-10-07 15:43:48 -05002811 return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002812}
2813
2814static void its_vpe_id_free(u16 id)
2815{
2816 ida_simple_remove(&its_vpeid_ida, id);
2817}
2818
2819static int its_vpe_init(struct its_vpe *vpe)
2820{
2821 struct page *vpt_page;
2822 int vpe_id;
2823
2824 /* Allocate vpe_id */
2825 vpe_id = its_vpe_id_alloc();
2826 if (vpe_id < 0)
2827 return vpe_id;
2828
2829 /* Allocate VPT */
2830 vpt_page = its_allocate_pending_table(GFP_KERNEL);
2831 if (!vpt_page) {
2832 its_vpe_id_free(vpe_id);
2833 return -ENOMEM;
2834 }
2835
2836 if (!its_alloc_vpe_table(vpe_id)) {
2837 its_vpe_id_free(vpe_id);
2838 its_free_pending_table(vpe->vpt_page);
2839 return -ENOMEM;
2840 }
2841
2842 vpe->vpe_id = vpe_id;
2843 vpe->vpt_page = vpt_page;
Marc Zyngier20b3d542016-12-20 15:23:22 +00002844 vpe->vpe_proxy_event = -1;
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002845
2846 return 0;
2847}
2848
2849static void its_vpe_teardown(struct its_vpe *vpe)
2850{
Marc Zyngier20b3d542016-12-20 15:23:22 +00002851 its_vpe_db_proxy_unmap(vpe);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002852 its_vpe_id_free(vpe->vpe_id);
2853 its_free_pending_table(vpe->vpt_page);
2854}
2855
2856static void its_vpe_irq_domain_free(struct irq_domain *domain,
2857 unsigned int virq,
2858 unsigned int nr_irqs)
2859{
2860 struct its_vm *vm = domain->host_data;
2861 int i;
2862
2863 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
2864
2865 for (i = 0; i < nr_irqs; i++) {
2866 struct irq_data *data = irq_domain_get_irq_data(domain,
2867 virq + i);
2868 struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
2869
2870 BUG_ON(vm != vpe->its_vm);
2871
2872 clear_bit(data->hwirq, vm->db_bitmap);
2873 its_vpe_teardown(vpe);
2874 irq_domain_reset_irq_data(data);
2875 }
2876
2877 if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
Marc Zyngier38dd7c42018-05-27 17:03:03 +01002878 its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002879 its_free_prop_table(vm->vprop_page);
2880 }
2881}
2882
2883static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2884 unsigned int nr_irqs, void *args)
2885{
2886 struct its_vm *vm = args;
2887 unsigned long *bitmap;
2888 struct page *vprop_page;
2889 int base, nr_ids, i, err = 0;
2890
2891 BUG_ON(!vm);
2892
Marc Zyngier38dd7c42018-05-27 17:03:03 +01002893 bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002894 if (!bitmap)
2895 return -ENOMEM;
2896
2897 if (nr_ids < nr_irqs) {
Marc Zyngier38dd7c42018-05-27 17:03:03 +01002898 its_lpi_free(bitmap, base, nr_ids);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002899 return -ENOMEM;
2900 }
2901
2902 vprop_page = its_allocate_prop_table(GFP_KERNEL);
2903 if (!vprop_page) {
Marc Zyngier38dd7c42018-05-27 17:03:03 +01002904 its_lpi_free(bitmap, base, nr_ids);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002905 return -ENOMEM;
2906 }
2907
2908 vm->db_bitmap = bitmap;
2909 vm->db_lpi_base = base;
2910 vm->nr_db_lpis = nr_ids;
2911 vm->vprop_page = vprop_page;
2912
2913 for (i = 0; i < nr_irqs; i++) {
2914 vm->vpes[i]->vpe_db_lpi = base + i;
2915 err = its_vpe_init(vm->vpes[i]);
2916 if (err)
2917 break;
2918 err = its_irq_gic_domain_alloc(domain, virq + i,
2919 vm->vpes[i]->vpe_db_lpi);
2920 if (err)
2921 break;
2922 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
2923 &its_vpe_irq_chip, vm->vpes[i]);
2924 set_bit(i, bitmap);
2925 }
2926
2927 if (err) {
2928 if (i > 0)
2929 its_vpe_irq_domain_free(domain, virq, i - 1);
2930
Marc Zyngier38dd7c42018-05-27 17:03:03 +01002931 its_lpi_free(bitmap, base, nr_ids);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002932 its_free_prop_table(vprop_page);
2933 }
2934
2935 return err;
2936}
2937
Thomas Gleixner72491642017-09-13 23:29:10 +02002938static int its_vpe_irq_domain_activate(struct irq_domain *domain,
Thomas Gleixner702cb0a2017-12-29 16:59:06 +01002939 struct irq_data *d, bool reserve)
Marc Zyngiereb781922016-12-20 14:47:05 +00002940{
2941 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
Marc Zyngier40619a22017-10-08 15:16:09 +01002942 struct its_node *its;
Marc Zyngiereb781922016-12-20 14:47:05 +00002943
Marc Zyngier2247e1b2017-10-08 18:50:36 +01002944 /* If we use the list map, we issue VMAPP on demand... */
2945 if (its_list_map)
Marc Zyngier6ef930f2017-11-07 10:04:38 +00002946 return 0;
Marc Zyngiereb781922016-12-20 14:47:05 +00002947
2948 /* Map the VPE to the first possible CPU */
2949 vpe->col_idx = cpumask_first(cpu_online_mask);
Marc Zyngier40619a22017-10-08 15:16:09 +01002950
2951 list_for_each_entry(its, &its_nodes, entry) {
2952 if (!its->is_v4)
2953 continue;
2954
Marc Zyngier75fd9512017-10-08 18:46:39 +01002955 its_send_vmapp(its, vpe, true);
Marc Zyngier40619a22017-10-08 15:16:09 +01002956 its_send_vinvall(its, vpe);
2957 }
2958
Marc Zyngier44c4c252017-10-19 10:11:34 +01002959 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
2960
Thomas Gleixner72491642017-09-13 23:29:10 +02002961 return 0;
Marc Zyngiereb781922016-12-20 14:47:05 +00002962}
2963
2964static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
2965 struct irq_data *d)
2966{
2967 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
Marc Zyngier75fd9512017-10-08 18:46:39 +01002968 struct its_node *its;
Marc Zyngiereb781922016-12-20 14:47:05 +00002969
Marc Zyngier2247e1b2017-10-08 18:50:36 +01002970 /*
2971 * If we use the list map, we unmap the VPE once no VLPIs are
2972 * associated with the VM.
2973 */
2974 if (its_list_map)
2975 return;
2976
Marc Zyngier75fd9512017-10-08 18:46:39 +01002977 list_for_each_entry(its, &its_nodes, entry) {
2978 if (!its->is_v4)
2979 continue;
2980
2981 its_send_vmapp(its, vpe, false);
2982 }
Marc Zyngiereb781922016-12-20 14:47:05 +00002983}
2984
Marc Zyngier8fff27a2016-12-20 13:41:55 +00002985static const struct irq_domain_ops its_vpe_domain_ops = {
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002986 .alloc = its_vpe_irq_domain_alloc,
2987 .free = its_vpe_irq_domain_free,
Marc Zyngiereb781922016-12-20 14:47:05 +00002988 .activate = its_vpe_irq_domain_activate,
2989 .deactivate = its_vpe_irq_domain_deactivate,
Marc Zyngier8fff27a2016-12-20 13:41:55 +00002990};
2991
Yun Wu4559fbb2015-03-06 16:37:50 +00002992static int its_force_quiescent(void __iomem *base)
2993{
2994 u32 count = 1000000; /* 1s */
2995 u32 val;
2996
2997 val = readl_relaxed(base + GITS_CTLR);
David Daney7611da82016-08-18 15:41:58 -07002998 /*
2999 * GIC architecture specification requires the ITS to be both
3000 * disabled and quiescent for writes to GITS_BASER<n> or
3001 * GITS_CBASER to not have UNPREDICTABLE results.
3002 */
3003 if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
Yun Wu4559fbb2015-03-06 16:37:50 +00003004 return 0;
3005
3006 /* Disable the generation of all interrupts to this ITS */
Marc Zyngierd51c4b42017-06-27 21:24:25 +01003007 val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
Yun Wu4559fbb2015-03-06 16:37:50 +00003008 writel_relaxed(val, base + GITS_CTLR);
3009
3010 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
3011 while (1) {
3012 val = readl_relaxed(base + GITS_CTLR);
3013 if (val & GITS_CTLR_QUIESCENT)
3014 return 0;
3015
3016 count--;
3017 if (!count)
3018 return -EBUSY;
3019
3020 cpu_relax();
3021 udelay(1);
3022 }
3023}
3024
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01003025static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
Robert Richter94100972015-09-21 22:58:38 +02003026{
3027 struct its_node *its = data;
3028
Ard Biesheuvelfa150012017-10-17 17:55:54 +01003029 /* erratum 22375: only alloc 8MB table size */
3030 its->device_ids = 0x14; /* 20 bits, 8MB */
Robert Richter94100972015-09-21 22:58:38 +02003031 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01003032
3033 return true;
Robert Richter94100972015-09-21 22:58:38 +02003034}
3035
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01003036static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02003037{
3038 struct its_node *its = data;
3039
3040 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01003041
3042 return true;
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02003043}
3044
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01003045static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
Shanker Donthineni90922a22017-03-07 08:20:38 -06003046{
3047 struct its_node *its = data;
3048
3049 /* On QDF2400, the size of the ITE is 16Bytes */
3050 its->ite_size = 16;
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01003051
3052 return true;
Shanker Donthineni90922a22017-03-07 08:20:38 -06003053}
3054
Ard Biesheuvel558b0162017-10-17 17:55:56 +01003055static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
3056{
3057 struct its_node *its = its_dev->its;
3058
3059 /*
3060 * The Socionext Synquacer SoC has a so-called 'pre-ITS',
3061 * which maps 32-bit writes targeted at a separate window of
3062 * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
3063 * with device ID taken from bits [device_id_bits + 1:2] of
3064 * the window offset.
3065 */
3066 return its->pre_its_base + (its_dev->device_id << 2);
3067}
3068
3069static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
3070{
3071 struct its_node *its = data;
3072 u32 pre_its_window[2];
3073 u32 ids;
3074
3075 if (!fwnode_property_read_u32_array(its->fwnode_handle,
3076 "socionext,synquacer-pre-its",
3077 pre_its_window,
3078 ARRAY_SIZE(pre_its_window))) {
3079
3080 its->pre_its_base = pre_its_window[0];
3081 its->get_msi_base = its_irq_get_msi_base_pre_its;
3082
3083 ids = ilog2(pre_its_window[1]) - 2;
3084 if (its->device_ids > ids)
3085 its->device_ids = ids;
3086
3087 /* the pre-ITS breaks isolation, so disable MSI remapping */
3088 its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
3089 return true;
3090 }
3091 return false;
3092}
3093
Marc Zyngier5c9a8822017-07-28 21:20:37 +01003094static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
3095{
3096 struct its_node *its = data;
3097
3098 /*
3099 * Hip07 insists on using the wrong address for the VLPI
3100 * page. Trick it into doing the right thing...
3101 */
3102 its->vlpi_redist_offset = SZ_128K;
3103 return true;
Marc Zyngiercc2d3212014-11-24 14:35:11 +00003104}
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003105
Robert Richter67510cc2015-09-21 22:58:37 +02003106static const struct gic_quirk its_quirks[] = {
Robert Richter94100972015-09-21 22:58:38 +02003107#ifdef CONFIG_CAVIUM_ERRATUM_22375
3108 {
3109 .desc = "ITS: Cavium errata 22375, 24313",
3110 .iidr = 0xa100034c, /* ThunderX pass 1.x */
3111 .mask = 0xffff0fff,
3112 .init = its_enable_quirk_cavium_22375,
3113 },
3114#endif
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02003115#ifdef CONFIG_CAVIUM_ERRATUM_23144
3116 {
3117 .desc = "ITS: Cavium erratum 23144",
3118 .iidr = 0xa100034c, /* ThunderX pass 1.x */
3119 .mask = 0xffff0fff,
3120 .init = its_enable_quirk_cavium_23144,
3121 },
3122#endif
Shanker Donthineni90922a22017-03-07 08:20:38 -06003123#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
3124 {
3125 .desc = "ITS: QDF2400 erratum 0065",
3126 .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */
3127 .mask = 0xffffffff,
3128 .init = its_enable_quirk_qdf2400_e0065,
3129 },
3130#endif
Ard Biesheuvel558b0162017-10-17 17:55:56 +01003131#ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
3132 {
3133 /*
3134 * The Socionext Synquacer SoC incorporates ARM's own GIC-500
3135 * implementation, but with a 'pre-ITS' added that requires
3136 * special handling in software.
3137 */
3138 .desc = "ITS: Socionext Synquacer pre-ITS",
3139 .iidr = 0x0001143b,
3140 .mask = 0xffffffff,
3141 .init = its_enable_quirk_socionext_synquacer,
3142 },
3143#endif
Marc Zyngier5c9a8822017-07-28 21:20:37 +01003144#ifdef CONFIG_HISILICON_ERRATUM_161600802
3145 {
3146 .desc = "ITS: Hip07 erratum 161600802",
3147 .iidr = 0x00000004,
3148 .mask = 0xffffffff,
3149 .init = its_enable_quirk_hip07_161600802,
3150 },
3151#endif
Robert Richter67510cc2015-09-21 22:58:37 +02003152 {
3153 }
3154};
3155
3156static void its_enable_quirks(struct its_node *its)
3157{
3158 u32 iidr = readl_relaxed(its->base + GITS_IIDR);
3159
3160 gic_enable_quirks(iidr, its_quirks, its);
3161}
3162
Derek Basehoredba0bc72018-02-28 21:48:18 -08003163static int its_save_disable(void)
3164{
3165 struct its_node *its;
3166 int err = 0;
3167
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02003168 raw_spin_lock(&its_lock);
Derek Basehoredba0bc72018-02-28 21:48:18 -08003169 list_for_each_entry(its, &its_nodes, entry) {
3170 void __iomem *base;
3171
3172 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
3173 continue;
3174
3175 base = its->base;
3176 its->ctlr_save = readl_relaxed(base + GITS_CTLR);
3177 err = its_force_quiescent(base);
3178 if (err) {
3179 pr_err("ITS@%pa: failed to quiesce: %d\n",
3180 &its->phys_base, err);
3181 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
3182 goto err;
3183 }
3184
3185 its->cbaser_save = gits_read_cbaser(base + GITS_CBASER);
3186 }
3187
3188err:
3189 if (err) {
3190 list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
3191 void __iomem *base;
3192
3193 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
3194 continue;
3195
3196 base = its->base;
3197 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
3198 }
3199 }
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02003200 raw_spin_unlock(&its_lock);
Derek Basehoredba0bc72018-02-28 21:48:18 -08003201
3202 return err;
3203}
3204
3205static void its_restore_enable(void)
3206{
3207 struct its_node *its;
3208 int ret;
3209
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02003210 raw_spin_lock(&its_lock);
Derek Basehoredba0bc72018-02-28 21:48:18 -08003211 list_for_each_entry(its, &its_nodes, entry) {
3212 void __iomem *base;
3213 int i;
3214
3215 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
3216 continue;
3217
3218 base = its->base;
3219
3220 /*
3221 * Make sure that the ITS is disabled. If it fails to quiesce,
3222 * don't restore it since writing to CBASER or BASER<n>
3223 * registers is undefined according to the GIC v3 ITS
3224 * Specification.
3225 */
3226 ret = its_force_quiescent(base);
3227 if (ret) {
3228 pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
3229 &its->phys_base, ret);
3230 continue;
3231 }
3232
3233 gits_write_cbaser(its->cbaser_save, base + GITS_CBASER);
3234
3235 /*
3236 * Writing CBASER resets CREADR to 0, so make CWRITER and
3237 * cmd_write line up with it.
3238 */
3239 its->cmd_write = its->cmd_base;
3240 gits_write_cwriter(0, base + GITS_CWRITER);
3241
3242 /* Restore GITS_BASER from the value cache. */
3243 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
3244 struct its_baser *baser = &its->tables[i];
3245
3246 if (!(baser->val & GITS_BASER_VALID))
3247 continue;
3248
3249 its_write_baser(its, baser, baser->val);
3250 }
3251 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
Derek Basehore920181c2018-02-28 21:48:20 -08003252
3253 /*
3254 * Reinit the collection if it's stored in the ITS. This is
3255 * indicated by the col_id being less than the HCC field.
3256 * CID < HCC as specified in the GIC v3 Documentation.
3257 */
3258 if (its->collections[smp_processor_id()].col_id <
3259 GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
3260 its_cpu_init_collection(its);
Derek Basehoredba0bc72018-02-28 21:48:18 -08003261 }
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02003262 raw_spin_unlock(&its_lock);
Derek Basehoredba0bc72018-02-28 21:48:18 -08003263}
3264
3265static struct syscore_ops its_syscore_ops = {
3266 .suspend = its_save_disable,
3267 .resume = its_restore_enable,
3268};
3269
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003270static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003271{
3272 struct irq_domain *inner_domain;
3273 struct msi_domain_info *info;
3274
3275 info = kzalloc(sizeof(*info), GFP_KERNEL);
3276 if (!info)
3277 return -ENOMEM;
3278
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003279 inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its);
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003280 if (!inner_domain) {
3281 kfree(info);
3282 return -ENOMEM;
3283 }
3284
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003285 inner_domain->parent = its_parent;
Marc Zyngier96f0d932017-06-22 11:42:50 +01003286 irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
Ard Biesheuvel558b0162017-10-17 17:55:56 +01003287 inner_domain->flags |= its->msi_domain_flags;
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003288 info->ops = &its_msi_domain_ops;
3289 info->data = its;
3290 inner_domain->host_data = info;
3291
3292 return 0;
3293}
3294
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003295static int its_init_vpe_domain(void)
3296{
Marc Zyngier20b3d542016-12-20 15:23:22 +00003297 struct its_node *its;
3298 u32 devid;
3299 int entries;
3300
3301 if (gic_rdists->has_direct_lpi) {
3302 pr_info("ITS: Using DirectLPI for VPE invalidation\n");
3303 return 0;
3304 }
3305
3306 /* Any ITS will do, even if not v4 */
3307 its = list_first_entry(&its_nodes, struct its_node, entry);
3308
3309 entries = roundup_pow_of_two(nr_cpu_ids);
Kees Cook6396bb22018-06-12 14:03:40 -07003310 vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes),
Marc Zyngier20b3d542016-12-20 15:23:22 +00003311 GFP_KERNEL);
3312 if (!vpe_proxy.vpes) {
3313 pr_err("ITS: Can't allocate GICv4 proxy device array\n");
3314 return -ENOMEM;
3315 }
3316
3317 /* Use the last possible DevID */
3318 devid = GENMASK(its->device_ids - 1, 0);
3319 vpe_proxy.dev = its_create_device(its, devid, entries, false);
3320 if (!vpe_proxy.dev) {
3321 kfree(vpe_proxy.vpes);
3322 pr_err("ITS: Can't allocate GICv4 proxy device\n");
3323 return -ENOMEM;
3324 }
3325
Shanker Donthinenic427a472017-09-23 13:50:19 -05003326 BUG_ON(entries > vpe_proxy.dev->nr_ites);
Marc Zyngier20b3d542016-12-20 15:23:22 +00003327
3328 raw_spin_lock_init(&vpe_proxy.lock);
3329 vpe_proxy.next_victim = 0;
3330 pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
3331 devid, vpe_proxy.dev->nr_ites);
3332
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003333 return 0;
3334}
3335
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003336static int __init its_compute_its_list_map(struct resource *res,
3337 void __iomem *its_base)
3338{
3339 int its_number;
3340 u32 ctlr;
3341
3342 /*
3343 * This is assumed to be done early enough that we're
3344 * guaranteed to be single-threaded, hence no
3345 * locking. Should this change, we should address
3346 * this.
3347 */
Marc Zyngierab604912017-10-08 18:48:06 +01003348 its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
3349 if (its_number >= GICv4_ITS_LIST_MAX) {
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003350 pr_err("ITS@%pa: No ITSList entry available!\n",
3351 &res->start);
3352 return -EINVAL;
3353 }
3354
3355 ctlr = readl_relaxed(its_base + GITS_CTLR);
3356 ctlr &= ~GITS_CTLR_ITS_NUMBER;
3357 ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
3358 writel_relaxed(ctlr, its_base + GITS_CTLR);
3359 ctlr = readl_relaxed(its_base + GITS_CTLR);
3360 if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
3361 its_number = ctlr & GITS_CTLR_ITS_NUMBER;
3362 its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
3363 }
3364
3365 if (test_and_set_bit(its_number, &its_list_map)) {
3366 pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
3367 &res->start, its_number);
3368 return -EINVAL;
3369 }
3370
3371 return its_number;
3372}
3373
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003374static int __init its_probe_one(struct resource *res,
3375 struct fwnode_handle *handle, int numa_node)
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003376{
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003377 struct its_node *its;
3378 void __iomem *its_base;
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003379 u32 val, ctlr;
3380 u64 baser, tmp, typer;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003381 int err;
3382
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003383 its_base = ioremap(res->start, resource_size(res));
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003384 if (!its_base) {
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003385 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003386 return -ENOMEM;
3387 }
3388
3389 val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
3390 if (val != 0x30 && val != 0x40) {
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003391 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003392 err = -ENODEV;
3393 goto out_unmap;
3394 }
3395
Yun Wu4559fbb2015-03-06 16:37:50 +00003396 err = its_force_quiescent(its_base);
3397 if (err) {
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003398 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
Yun Wu4559fbb2015-03-06 16:37:50 +00003399 goto out_unmap;
3400 }
3401
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003402 pr_info("ITS %pR\n", res);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003403
3404 its = kzalloc(sizeof(*its), GFP_KERNEL);
3405 if (!its) {
3406 err = -ENOMEM;
3407 goto out_unmap;
3408 }
3409
3410 raw_spin_lock_init(&its->lock);
3411 INIT_LIST_HEAD(&its->entry);
3412 INIT_LIST_HEAD(&its->its_device_list);
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003413 typer = gic_read_typer(its_base + GITS_TYPER);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003414 its->base = its_base;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003415 its->phys_base = res->start;
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003416 its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer);
Ard Biesheuvelfa150012017-10-17 17:55:54 +01003417 its->device_ids = GITS_TYPER_DEVBITS(typer);
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003418 its->is_v4 = !!(typer & GITS_TYPER_VLPIS);
3419 if (its->is_v4) {
3420 if (!(typer & GITS_TYPER_VMOVP)) {
3421 err = its_compute_its_list_map(res, its_base);
3422 if (err < 0)
3423 goto out_free_its;
3424
Marc Zyngierdebf6d02017-10-08 18:44:42 +01003425 its->list_nr = err;
3426
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003427 pr_info("ITS@%pa: Using ITS number %d\n",
3428 &res->start, err);
3429 } else {
3430 pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
3431 }
3432 }
3433
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003434 its->numa_node = numa_node;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003435
Robert Richter5bc13c22017-02-01 18:38:25 +01003436 its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
3437 get_order(ITS_CMD_QUEUE_SZ));
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003438 if (!its->cmd_base) {
3439 err = -ENOMEM;
3440 goto out_free_its;
3441 }
3442 its->cmd_write = its->cmd_base;
Ard Biesheuvel558b0162017-10-17 17:55:56 +01003443 its->fwnode_handle = handle;
3444 its->get_msi_base = its_irq_get_msi_base;
3445 its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003446
Robert Richter67510cc2015-09-21 22:58:37 +02003447 its_enable_quirks(its);
3448
Shanker Donthineni0e0b0f62016-06-06 18:17:31 -05003449 err = its_alloc_tables(its);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003450 if (err)
3451 goto out_free_cmd;
3452
3453 err = its_alloc_collections(its);
3454 if (err)
3455 goto out_free_tables;
3456
3457 baser = (virt_to_phys(its->cmd_base) |
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06003458 GITS_CBASER_RaWaWb |
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003459 GITS_CBASER_InnerShareable |
3460 (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
3461 GITS_CBASER_VALID);
3462
Vladimir Murzin0968a612016-11-02 11:54:06 +00003463 gits_write_cbaser(baser, its->base + GITS_CBASER);
3464 tmp = gits_read_cbaser(its->base + GITS_CBASER);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003465
Marc Zyngier4ad3e362015-03-27 14:15:04 +00003466 if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
Marc Zyngier241a3862015-03-27 14:15:05 +00003467 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
3468 /*
3469 * The HW reports non-shareable, we must
3470 * remove the cacheability attributes as
3471 * well.
3472 */
3473 baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
3474 GITS_CBASER_CACHEABILITY_MASK);
3475 baser |= GITS_CBASER_nC;
Vladimir Murzin0968a612016-11-02 11:54:06 +00003476 gits_write_cbaser(baser, its->base + GITS_CBASER);
Marc Zyngier241a3862015-03-27 14:15:05 +00003477 }
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003478 pr_info("ITS: using cache flushing for cmd queue\n");
3479 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
3480 }
3481
Vladimir Murzin0968a612016-11-02 11:54:06 +00003482 gits_write_cwriter(0, its->base + GITS_CWRITER);
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003483 ctlr = readl_relaxed(its->base + GITS_CTLR);
Marc Zyngierd51c4b42017-06-27 21:24:25 +01003484 ctlr |= GITS_CTLR_ENABLE;
3485 if (its->is_v4)
3486 ctlr |= GITS_CTLR_ImDe;
3487 writel_relaxed(ctlr, its->base + GITS_CTLR);
Marc Zyngier241a3862015-03-27 14:15:05 +00003488
Derek Basehoredba0bc72018-02-28 21:48:18 -08003489 if (GITS_TYPER_HCC(typer))
3490 its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE;
3491
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003492 err = its_init_domain(handle, its);
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003493 if (err)
3494 goto out_free_tables;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003495
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02003496 raw_spin_lock(&its_lock);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003497 list_add(&its->entry, &its_nodes);
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02003498 raw_spin_unlock(&its_lock);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003499
3500 return 0;
3501
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003502out_free_tables:
3503 its_free_tables(its);
3504out_free_cmd:
Robert Richter5bc13c22017-02-01 18:38:25 +01003505 free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003506out_free_its:
3507 kfree(its);
3508out_unmap:
3509 iounmap(its_base);
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003510 pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003511 return err;
3512}
3513
3514static bool gic_rdists_supports_plpis(void)
3515{
Marc Zyngier589ce5f2016-10-14 15:13:07 +01003516 return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003517}
3518
Shanker Donthineni6eb486b2018-03-21 20:58:49 -05003519static int redist_disable_lpis(void)
3520{
3521 void __iomem *rbase = gic_data_rdist_rd_base();
3522 u64 timeout = USEC_PER_SEC;
3523 u64 val;
3524
3525 if (!gic_rdists_supports_plpis()) {
3526 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
3527 return -ENXIO;
3528 }
3529
3530 val = readl_relaxed(rbase + GICR_CTLR);
3531 if (!(val & GICR_CTLR_ENABLE_LPIS))
3532 return 0;
3533
Marc Zyngier11e37d32018-07-27 13:38:54 +01003534 /*
3535 * If coming via a CPU hotplug event, we don't need to disable
3536 * LPIs before trying to re-enable them. They are already
3537 * configured and all is well in the world.
3538 */
3539 if (gic_data_rdist()->lpi_enabled)
3540 return 0;
3541
3542 /*
3543 * From that point on, we only try to do some damage control.
3544 */
3545 pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
Shanker Donthineni6eb486b2018-03-21 20:58:49 -05003546 smp_processor_id());
3547 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
3548
3549 /* Disable LPIs */
3550 val &= ~GICR_CTLR_ENABLE_LPIS;
3551 writel_relaxed(val, rbase + GICR_CTLR);
3552
3553 /* Make sure any change to GICR_CTLR is observable by the GIC */
3554 dsb(sy);
3555
3556 /*
3557 * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
3558 * from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
3559 * Error out if we time out waiting for RWP to clear.
3560 */
3561 while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) {
3562 if (!timeout) {
3563 pr_err("CPU%d: Timeout while disabling LPIs\n",
3564 smp_processor_id());
3565 return -ETIMEDOUT;
3566 }
3567 udelay(1);
3568 timeout--;
3569 }
3570
3571 /*
3572 * After it has been written to 1, it is IMPLEMENTATION
3573 * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
3574 * cleared to 0. Error out if clearing the bit failed.
3575 */
3576 if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) {
3577 pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id());
3578 return -EBUSY;
3579 }
3580
3581 return 0;
3582}
3583
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003584int its_cpu_init(void)
3585{
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003586 if (!list_empty(&its_nodes)) {
Shanker Donthineni6eb486b2018-03-21 20:58:49 -05003587 int ret;
3588
3589 ret = redist_disable_lpis();
3590 if (ret)
3591 return ret;
3592
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003593 its_cpu_init_lpis();
Derek Basehore920181c2018-02-28 21:48:20 -08003594 its_cpu_init_collections();
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003595 }
3596
3597 return 0;
3598}
3599
Arvind Yadav935bba72017-06-22 16:05:30 +05303600static const struct of_device_id its_device_id[] = {
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003601 { .compatible = "arm,gic-v3-its", },
3602 {},
3603};
3604
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003605static int __init its_of_probe(struct device_node *node)
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003606{
3607 struct device_node *np;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003608 struct resource res;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003609
3610 for (np = of_find_matching_node(node, its_device_id); np;
3611 np = of_find_matching_node(np, its_device_id)) {
Stephen Boyd95a25622018-02-01 09:03:29 -08003612 if (!of_device_is_available(np))
3613 continue;
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003614 if (!of_property_read_bool(np, "msi-controller")) {
Rob Herringe81f54c2017-07-18 16:43:10 -05003615 pr_warn("%pOF: no msi-controller property, ITS ignored\n",
3616 np);
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003617 continue;
3618 }
3619
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003620 if (of_address_to_resource(np, 0, &res)) {
Rob Herringe81f54c2017-07-18 16:43:10 -05003621 pr_warn("%pOF: no regs?\n", np);
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003622 continue;
3623 }
3624
3625 its_probe_one(&res, &np->fwnode, of_node_to_nid(np));
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003626 }
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003627 return 0;
3628}
3629
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003630#ifdef CONFIG_ACPI
3631
3632#define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
3633
Robert Richterd1ce2632017-07-12 15:25:09 +02003634#ifdef CONFIG_ACPI_NUMA
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303635struct its_srat_map {
3636 /* numa node id */
3637 u32 numa_node;
3638 /* GIC ITS ID */
3639 u32 its_id;
3640};
3641
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003642static struct its_srat_map *its_srat_maps __initdata;
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303643static int its_in_srat __initdata;
3644
3645static int __init acpi_get_its_numa_node(u32 its_id)
3646{
3647 int i;
3648
3649 for (i = 0; i < its_in_srat; i++) {
3650 if (its_id == its_srat_maps[i].its_id)
3651 return its_srat_maps[i].numa_node;
3652 }
3653 return NUMA_NO_NODE;
3654}
3655
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003656static int __init gic_acpi_match_srat_its(struct acpi_subtable_header *header,
3657 const unsigned long end)
3658{
3659 return 0;
3660}
3661
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303662static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header,
3663 const unsigned long end)
3664{
3665 int node;
3666 struct acpi_srat_gic_its_affinity *its_affinity;
3667
3668 its_affinity = (struct acpi_srat_gic_its_affinity *)header;
3669 if (!its_affinity)
3670 return -EINVAL;
3671
3672 if (its_affinity->header.length < sizeof(*its_affinity)) {
3673 pr_err("SRAT: Invalid header length %d in ITS affinity\n",
3674 its_affinity->header.length);
3675 return -EINVAL;
3676 }
3677
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303678 node = acpi_map_pxm_to_node(its_affinity->proximity_domain);
3679
3680 if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
3681 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
3682 return 0;
3683 }
3684
3685 its_srat_maps[its_in_srat].numa_node = node;
3686 its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
3687 its_in_srat++;
3688 pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
3689 its_affinity->proximity_domain, its_affinity->its_id, node);
3690
3691 return 0;
3692}
3693
3694static void __init acpi_table_parse_srat_its(void)
3695{
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003696 int count;
3697
3698 count = acpi_table_parse_entries(ACPI_SIG_SRAT,
3699 sizeof(struct acpi_table_srat),
3700 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
3701 gic_acpi_match_srat_its, 0);
3702 if (count <= 0)
3703 return;
3704
Kees Cook6da2ec52018-06-12 13:55:00 -07003705 its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map),
3706 GFP_KERNEL);
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003707 if (!its_srat_maps) {
3708 pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n");
3709 return;
3710 }
3711
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303712 acpi_table_parse_entries(ACPI_SIG_SRAT,
3713 sizeof(struct acpi_table_srat),
3714 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
3715 gic_acpi_parse_srat_its, 0);
3716}
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003717
3718/* free the its_srat_maps after ITS probing */
3719static void __init acpi_its_srat_maps_free(void)
3720{
3721 kfree(its_srat_maps);
3722}
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303723#else
3724static void __init acpi_table_parse_srat_its(void) { }
3725static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003726static void __init acpi_its_srat_maps_free(void) { }
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303727#endif
3728
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003729static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header,
3730 const unsigned long end)
3731{
3732 struct acpi_madt_generic_translator *its_entry;
3733 struct fwnode_handle *dom_handle;
3734 struct resource res;
3735 int err;
3736
3737 its_entry = (struct acpi_madt_generic_translator *)header;
3738 memset(&res, 0, sizeof(res));
3739 res.start = its_entry->base_address;
3740 res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
3741 res.flags = IORESOURCE_MEM;
3742
3743 dom_handle = irq_domain_alloc_fwnode((void *)its_entry->base_address);
3744 if (!dom_handle) {
3745 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
3746 &res.start);
3747 return -ENOMEM;
3748 }
3749
Shameer Kolothum8b4282e2018-02-13 15:20:50 +00003750 err = iort_register_domain_token(its_entry->translation_id, res.start,
3751 dom_handle);
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003752 if (err) {
3753 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
3754 &res.start, its_entry->translation_id);
3755 goto dom_err;
3756 }
3757
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303758 err = its_probe_one(&res, dom_handle,
3759 acpi_get_its_numa_node(its_entry->translation_id));
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003760 if (!err)
3761 return 0;
3762
3763 iort_deregister_domain_token(its_entry->translation_id);
3764dom_err:
3765 irq_domain_free_fwnode(dom_handle);
3766 return err;
3767}
3768
3769static void __init its_acpi_probe(void)
3770{
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303771 acpi_table_parse_srat_its();
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003772 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
3773 gic_acpi_parse_madt_its, 0);
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003774 acpi_its_srat_maps_free();
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003775}
3776#else
3777static void __init its_acpi_probe(void) { }
3778#endif
3779
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003780int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
3781 struct irq_domain *parent_domain)
3782{
3783 struct device_node *of_node;
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003784 struct its_node *its;
3785 bool has_v4 = false;
3786 int err;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003787
3788 its_parent = parent_domain;
3789 of_node = to_of_node(handle);
3790 if (of_node)
3791 its_of_probe(of_node);
3792 else
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003793 its_acpi_probe();
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003794
3795 if (list_empty(&its_nodes)) {
3796 pr_warn("ITS: No ITS available, not enabling LPIs\n");
3797 return -ENXIO;
3798 }
3799
3800 gic_rdists = rdists;
Marc Zyngier11e37d32018-07-27 13:38:54 +01003801
3802 err = allocate_lpi_tables();
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003803 if (err)
3804 return err;
3805
3806 list_for_each_entry(its, &its_nodes, entry)
3807 has_v4 |= its->is_v4;
3808
3809 if (has_v4 & rdists->has_vlpis) {
Marc Zyngier3d63cb52016-12-20 15:31:54 +00003810 if (its_init_vpe_domain() ||
3811 its_init_v4(parent_domain, &its_vpe_domain_ops)) {
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003812 rdists->has_vlpis = false;
3813 pr_err("ITS: Disabling GICv4 support\n");
3814 }
3815 }
3816
Derek Basehoredba0bc72018-02-28 21:48:18 -08003817 register_syscore_ops(&its_syscore_ops);
3818
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003819 return 0;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003820}