blob: dc0ece20f96428b820751d0aba370f06c2eada79 [file] [log] [blame]
Marc Zyngiercc2d3212014-11-24 14:35:11 +00001/*
Marc Zyngierd7276b82016-12-20 15:11:47 +00002 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
Marc Zyngiercc2d3212014-11-24 14:35:11 +00003 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +020018#include <linux/acpi.h>
Hanjun Guo8d3554b2017-03-07 20:39:59 +080019#include <linux/acpi_iort.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000020#include <linux/bitmap.h>
21#include <linux/cpu.h>
22#include <linux/delay.h>
Robin Murphy44bb7e22016-09-12 17:13:59 +010023#include <linux/dma-iommu.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000024#include <linux/interrupt.h>
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +020025#include <linux/irqdomain.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000026#include <linux/log2.h>
27#include <linux/mm.h>
28#include <linux/msi.h>
29#include <linux/of.h>
30#include <linux/of_address.h>
31#include <linux/of_irq.h>
32#include <linux/of_pci.h>
33#include <linux/of_platform.h>
34#include <linux/percpu.h>
35#include <linux/slab.h>
36
Joel Porquet41a83e062015-07-07 17:11:46 -040037#include <linux/irqchip.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000038#include <linux/irqchip/arm-gic-v3.h>
Marc Zyngierc808eea2016-12-20 09:31:20 +000039#include <linux/irqchip/arm-gic-v4.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000040
Marc Zyngiercc2d3212014-11-24 14:35:11 +000041#include <asm/cputype.h>
42#include <asm/exception.h>
43
Robert Richter67510cc2015-09-21 22:58:37 +020044#include "irq-gic-common.h"
45
Robert Richter94100972015-09-21 22:58:38 +020046#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
47#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +020048#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
Marc Zyngiercc2d3212014-11-24 14:35:11 +000049
Marc Zyngierc48ed512014-11-24 14:35:12 +000050#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
51
Marc Zyngiera13b0402016-12-19 17:15:24 +000052static u32 lpi_id_bits;
53
54/*
55 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
56 * deal with (one configuration byte per interrupt). PENDBASE has to
57 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
58 */
59#define LPI_NRBITS lpi_id_bits
60#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
61#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
62
63#define LPI_PROP_DEFAULT_PRIO 0xa0
64
Marc Zyngiercc2d3212014-11-24 14:35:11 +000065/*
66 * Collection structure - just an ID, and a redistributor address to
67 * ping. We use one per CPU as a bag of interrupts assigned to this
68 * CPU.
69 */
70struct its_collection {
71 u64 target_address;
72 u16 col_id;
73};
74
75/*
Shanker Donthineni93473592016-06-06 18:17:30 -050076 * The ITS_BASER structure - contains memory information, cached
77 * value of BASER register configuration and ITS page size.
Shanker Donthineni466b7d12016-03-09 22:10:49 -060078 */
79struct its_baser {
80 void *base;
81 u64 val;
82 u32 order;
Shanker Donthineni93473592016-06-06 18:17:30 -050083 u32 psz;
Shanker Donthineni466b7d12016-03-09 22:10:49 -060084};
85
Ard Biesheuvel558b0162017-10-17 17:55:56 +010086struct its_device;
87
Shanker Donthineni466b7d12016-03-09 22:10:49 -060088/*
Marc Zyngiercc2d3212014-11-24 14:35:11 +000089 * The ITS structure - contains most of the infrastructure, with the
Marc Zyngier841514a2015-07-28 14:46:20 +010090 * top-level MSI domain, the command queue, the collections, and the
91 * list of devices writing to it.
Marc Zyngiercc2d3212014-11-24 14:35:11 +000092 */
93struct its_node {
94 raw_spinlock_t lock;
95 struct list_head entry;
Marc Zyngiercc2d3212014-11-24 14:35:11 +000096 void __iomem *base;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +020097 phys_addr_t phys_base;
Marc Zyngiercc2d3212014-11-24 14:35:11 +000098 struct its_cmd_block *cmd_base;
99 struct its_cmd_block *cmd_write;
Shanker Donthineni466b7d12016-03-09 22:10:49 -0600100 struct its_baser tables[GITS_BASER_NR_REGS];
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000101 struct its_collection *collections;
Ard Biesheuvel558b0162017-10-17 17:55:56 +0100102 struct fwnode_handle *fwnode_handle;
103 u64 (*get_msi_base)(struct its_device *its_dev);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000104 struct list_head its_device_list;
105 u64 flags;
Marc Zyngierdebf6d02017-10-08 18:44:42 +0100106 unsigned long list_nr;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000107 u32 ite_size;
Shanker Donthineni466b7d12016-03-09 22:10:49 -0600108 u32 device_ids;
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +0200109 int numa_node;
Ard Biesheuvel558b0162017-10-17 17:55:56 +0100110 unsigned int msi_domain_flags;
111 u32 pre_its_base; /* for Socionext Synquacer */
Marc Zyngier3dfa5762016-12-19 17:25:54 +0000112 bool is_v4;
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100113 int vlpi_redist_offset;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000114};
115
116#define ITS_ITT_ALIGN SZ_256
117
Shanker Donthineni2eca0d62016-02-16 18:00:36 -0600118/* Convert page order to size in bytes */
119#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
120
Marc Zyngier591e5be2015-07-17 10:46:42 +0100121struct event_lpi_map {
122 unsigned long *lpi_map;
123 u16 *col_map;
124 irq_hw_number_t lpi_base;
125 int nr_lpis;
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000126 struct mutex vlpi_lock;
127 struct its_vm *vm;
128 struct its_vlpi_map *vlpi_maps;
129 int nr_vlpis;
Marc Zyngier591e5be2015-07-17 10:46:42 +0100130};
131
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000132/*
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000133 * The ITS view of a device - belongs to an ITS, owns an interrupt
134 * translation table, and a list of interrupts. If it some of its
135 * LPIs are injected into a guest (GICv4), the event_map.vm field
136 * indicates which one.
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000137 */
138struct its_device {
139 struct list_head entry;
140 struct its_node *its;
Marc Zyngier591e5be2015-07-17 10:46:42 +0100141 struct event_lpi_map event_map;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000142 void *itt;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000143 u32 nr_ites;
144 u32 device_id;
145};
146
Marc Zyngier20b3d542016-12-20 15:23:22 +0000147static struct {
148 raw_spinlock_t lock;
149 struct its_device *dev;
150 struct its_vpe **vpes;
151 int next_victim;
152} vpe_proxy;
153
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000154static LIST_HEAD(its_nodes);
155static DEFINE_SPINLOCK(its_lock);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000156static struct rdists *gic_rdists;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +0200157static struct irq_domain *its_parent;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000158
Marc Zyngier3dfa5762016-12-19 17:25:54 +0000159static unsigned long its_list_map;
Marc Zyngier3171a472016-12-20 15:17:28 +0000160static u16 vmovp_seq_num;
161static DEFINE_RAW_SPINLOCK(vmovp_lock);
162
Marc Zyngier7d75bbb2016-12-20 13:55:54 +0000163static DEFINE_IDA(its_vpeid_ida);
Marc Zyngier3dfa5762016-12-19 17:25:54 +0000164
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000165#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
166#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
Marc Zyngiere643d802016-12-20 15:09:31 +0000167#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000168
Marc Zyngier591e5be2015-07-17 10:46:42 +0100169static struct its_collection *dev_event_to_col(struct its_device *its_dev,
170 u32 event)
171{
172 struct its_node *its = its_dev->its;
173
174 return its->collections + its_dev->event_map.col_map[event];
175}
176
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000177/*
178 * ITS command descriptors - parameters to be encoded in a command
179 * block.
180 */
181struct its_cmd_desc {
182 union {
183 struct {
184 struct its_device *dev;
185 u32 event_id;
186 } its_inv_cmd;
187
188 struct {
189 struct its_device *dev;
190 u32 event_id;
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000191 } its_clear_cmd;
192
193 struct {
194 struct its_device *dev;
195 u32 event_id;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000196 } its_int_cmd;
197
198 struct {
199 struct its_device *dev;
200 int valid;
201 } its_mapd_cmd;
202
203 struct {
204 struct its_collection *col;
205 int valid;
206 } its_mapc_cmd;
207
208 struct {
209 struct its_device *dev;
210 u32 phys_id;
211 u32 event_id;
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000212 } its_mapti_cmd;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000213
214 struct {
215 struct its_device *dev;
216 struct its_collection *col;
Marc Zyngier591e5be2015-07-17 10:46:42 +0100217 u32 event_id;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000218 } its_movi_cmd;
219
220 struct {
221 struct its_device *dev;
222 u32 event_id;
223 } its_discard_cmd;
224
225 struct {
226 struct its_collection *col;
227 } its_invall_cmd;
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000228
229 struct {
230 struct its_vpe *vpe;
Marc Zyngiereb781922016-12-20 14:47:05 +0000231 } its_vinvall_cmd;
232
233 struct {
234 struct its_vpe *vpe;
235 struct its_collection *col;
236 bool valid;
237 } its_vmapp_cmd;
238
239 struct {
240 struct its_vpe *vpe;
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000241 struct its_device *dev;
242 u32 virt_id;
243 u32 event_id;
244 bool db_enabled;
245 } its_vmapti_cmd;
246
247 struct {
248 struct its_vpe *vpe;
249 struct its_device *dev;
250 u32 event_id;
251 bool db_enabled;
252 } its_vmovi_cmd;
Marc Zyngier3171a472016-12-20 15:17:28 +0000253
254 struct {
255 struct its_vpe *vpe;
256 struct its_collection *col;
257 u16 seq_num;
258 u16 its_list;
259 } its_vmovp_cmd;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000260 };
261};
262
263/*
264 * The ITS command block, which is what the ITS actually parses.
265 */
266struct its_cmd_block {
267 u64 raw_cmd[4];
268};
269
270#define ITS_CMD_QUEUE_SZ SZ_64K
271#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
272
Marc Zyngier67047f902017-07-28 21:16:58 +0100273typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
274 struct its_cmd_block *,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000275 struct its_cmd_desc *);
276
Marc Zyngier67047f902017-07-28 21:16:58 +0100277typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
278 struct its_cmd_block *,
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000279 struct its_cmd_desc *);
280
Marc Zyngier4d36f132016-12-19 17:11:52 +0000281static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
282{
283 u64 mask = GENMASK_ULL(h, l);
284 *raw_cmd &= ~mask;
285 *raw_cmd |= (val << l) & mask;
286}
287
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000288static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
289{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000290 its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000291}
292
293static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
294{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000295 its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000296}
297
298static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
299{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000300 its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000301}
302
303static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
304{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000305 its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000306}
307
308static void its_encode_size(struct its_cmd_block *cmd, u8 size)
309{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000310 its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000311}
312
313static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
314{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000315 its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 50, 8);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000316}
317
318static void its_encode_valid(struct its_cmd_block *cmd, int valid)
319{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000320 its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000321}
322
323static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
324{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000325 its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 50, 16);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000326}
327
328static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
329{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000330 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000331}
332
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000333static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
334{
335 its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
336}
337
338static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
339{
340 its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
341}
342
343static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
344{
345 its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
346}
347
348static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
349{
350 its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
351}
352
Marc Zyngier3171a472016-12-20 15:17:28 +0000353static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
354{
355 its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
356}
357
358static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
359{
360 its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
361}
362
Marc Zyngiereb781922016-12-20 14:47:05 +0000363static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
364{
365 its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 50, 16);
366}
367
368static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
369{
370 its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
371}
372
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000373static inline void its_fixup_cmd(struct its_cmd_block *cmd)
374{
375 /* Let's fixup BE commands */
376 cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]);
377 cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]);
378 cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]);
379 cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]);
380}
381
Marc Zyngier67047f902017-07-28 21:16:58 +0100382static struct its_collection *its_build_mapd_cmd(struct its_node *its,
383 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000384 struct its_cmd_desc *desc)
385{
386 unsigned long itt_addr;
Marc Zyngierc8481262014-12-12 10:51:24 +0000387 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000388
389 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
390 itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
391
392 its_encode_cmd(cmd, GITS_CMD_MAPD);
393 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
394 its_encode_size(cmd, size - 1);
395 its_encode_itt(cmd, itt_addr);
396 its_encode_valid(cmd, desc->its_mapd_cmd.valid);
397
398 its_fixup_cmd(cmd);
399
Marc Zyngier591e5be2015-07-17 10:46:42 +0100400 return NULL;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000401}
402
Marc Zyngier67047f902017-07-28 21:16:58 +0100403static struct its_collection *its_build_mapc_cmd(struct its_node *its,
404 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000405 struct its_cmd_desc *desc)
406{
407 its_encode_cmd(cmd, GITS_CMD_MAPC);
408 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
409 its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
410 its_encode_valid(cmd, desc->its_mapc_cmd.valid);
411
412 its_fixup_cmd(cmd);
413
414 return desc->its_mapc_cmd.col;
415}
416
Marc Zyngier67047f902017-07-28 21:16:58 +0100417static struct its_collection *its_build_mapti_cmd(struct its_node *its,
418 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000419 struct its_cmd_desc *desc)
420{
Marc Zyngier591e5be2015-07-17 10:46:42 +0100421 struct its_collection *col;
422
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000423 col = dev_event_to_col(desc->its_mapti_cmd.dev,
424 desc->its_mapti_cmd.event_id);
Marc Zyngier591e5be2015-07-17 10:46:42 +0100425
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000426 its_encode_cmd(cmd, GITS_CMD_MAPTI);
427 its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
428 its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
429 its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
Marc Zyngier591e5be2015-07-17 10:46:42 +0100430 its_encode_collection(cmd, col->col_id);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000431
432 its_fixup_cmd(cmd);
433
Marc Zyngier591e5be2015-07-17 10:46:42 +0100434 return col;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000435}
436
Marc Zyngier67047f902017-07-28 21:16:58 +0100437static struct its_collection *its_build_movi_cmd(struct its_node *its,
438 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000439 struct its_cmd_desc *desc)
440{
Marc Zyngier591e5be2015-07-17 10:46:42 +0100441 struct its_collection *col;
442
443 col = dev_event_to_col(desc->its_movi_cmd.dev,
444 desc->its_movi_cmd.event_id);
445
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000446 its_encode_cmd(cmd, GITS_CMD_MOVI);
447 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
Marc Zyngier591e5be2015-07-17 10:46:42 +0100448 its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000449 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
450
451 its_fixup_cmd(cmd);
452
Marc Zyngier591e5be2015-07-17 10:46:42 +0100453 return col;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000454}
455
Marc Zyngier67047f902017-07-28 21:16:58 +0100456static struct its_collection *its_build_discard_cmd(struct its_node *its,
457 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000458 struct its_cmd_desc *desc)
459{
Marc Zyngier591e5be2015-07-17 10:46:42 +0100460 struct its_collection *col;
461
462 col = dev_event_to_col(desc->its_discard_cmd.dev,
463 desc->its_discard_cmd.event_id);
464
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000465 its_encode_cmd(cmd, GITS_CMD_DISCARD);
466 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
467 its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
468
469 its_fixup_cmd(cmd);
470
Marc Zyngier591e5be2015-07-17 10:46:42 +0100471 return col;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000472}
473
Marc Zyngier67047f902017-07-28 21:16:58 +0100474static struct its_collection *its_build_inv_cmd(struct its_node *its,
475 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000476 struct its_cmd_desc *desc)
477{
Marc Zyngier591e5be2015-07-17 10:46:42 +0100478 struct its_collection *col;
479
480 col = dev_event_to_col(desc->its_inv_cmd.dev,
481 desc->its_inv_cmd.event_id);
482
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000483 its_encode_cmd(cmd, GITS_CMD_INV);
484 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
485 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
486
487 its_fixup_cmd(cmd);
488
Marc Zyngier591e5be2015-07-17 10:46:42 +0100489 return col;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000490}
491
Marc Zyngier67047f902017-07-28 21:16:58 +0100492static struct its_collection *its_build_int_cmd(struct its_node *its,
493 struct its_cmd_block *cmd,
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000494 struct its_cmd_desc *desc)
495{
496 struct its_collection *col;
497
498 col = dev_event_to_col(desc->its_int_cmd.dev,
499 desc->its_int_cmd.event_id);
500
501 its_encode_cmd(cmd, GITS_CMD_INT);
502 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
503 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
504
505 its_fixup_cmd(cmd);
506
507 return col;
508}
509
Marc Zyngier67047f902017-07-28 21:16:58 +0100510static struct its_collection *its_build_clear_cmd(struct its_node *its,
511 struct its_cmd_block *cmd,
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000512 struct its_cmd_desc *desc)
513{
514 struct its_collection *col;
515
516 col = dev_event_to_col(desc->its_clear_cmd.dev,
517 desc->its_clear_cmd.event_id);
518
519 its_encode_cmd(cmd, GITS_CMD_CLEAR);
520 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
521 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
522
523 its_fixup_cmd(cmd);
524
525 return col;
526}
527
Marc Zyngier67047f902017-07-28 21:16:58 +0100528static struct its_collection *its_build_invall_cmd(struct its_node *its,
529 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000530 struct its_cmd_desc *desc)
531{
532 its_encode_cmd(cmd, GITS_CMD_INVALL);
533 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
534
535 its_fixup_cmd(cmd);
536
537 return NULL;
538}
539
Marc Zyngier67047f902017-07-28 21:16:58 +0100540static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
541 struct its_cmd_block *cmd,
Marc Zyngiereb781922016-12-20 14:47:05 +0000542 struct its_cmd_desc *desc)
543{
544 its_encode_cmd(cmd, GITS_CMD_VINVALL);
545 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
546
547 its_fixup_cmd(cmd);
548
549 return desc->its_vinvall_cmd.vpe;
550}
551
Marc Zyngier67047f902017-07-28 21:16:58 +0100552static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
553 struct its_cmd_block *cmd,
Marc Zyngiereb781922016-12-20 14:47:05 +0000554 struct its_cmd_desc *desc)
555{
556 unsigned long vpt_addr;
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100557 u64 target;
Marc Zyngiereb781922016-12-20 14:47:05 +0000558
559 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100560 target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
Marc Zyngiereb781922016-12-20 14:47:05 +0000561
562 its_encode_cmd(cmd, GITS_CMD_VMAPP);
563 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
564 its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100565 its_encode_target(cmd, target);
Marc Zyngiereb781922016-12-20 14:47:05 +0000566 its_encode_vpt_addr(cmd, vpt_addr);
567 its_encode_vpt_size(cmd, LPI_NRBITS - 1);
568
569 its_fixup_cmd(cmd);
570
571 return desc->its_vmapp_cmd.vpe;
572}
573
Marc Zyngier67047f902017-07-28 21:16:58 +0100574static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
575 struct its_cmd_block *cmd,
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000576 struct its_cmd_desc *desc)
577{
578 u32 db;
579
580 if (desc->its_vmapti_cmd.db_enabled)
581 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
582 else
583 db = 1023;
584
585 its_encode_cmd(cmd, GITS_CMD_VMAPTI);
586 its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
587 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
588 its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
589 its_encode_db_phys_id(cmd, db);
590 its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
591
592 its_fixup_cmd(cmd);
593
594 return desc->its_vmapti_cmd.vpe;
595}
596
Marc Zyngier67047f902017-07-28 21:16:58 +0100597static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
598 struct its_cmd_block *cmd,
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000599 struct its_cmd_desc *desc)
600{
601 u32 db;
602
603 if (desc->its_vmovi_cmd.db_enabled)
604 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
605 else
606 db = 1023;
607
608 its_encode_cmd(cmd, GITS_CMD_VMOVI);
609 its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
610 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
611 its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
612 its_encode_db_phys_id(cmd, db);
613 its_encode_db_valid(cmd, true);
614
615 its_fixup_cmd(cmd);
616
617 return desc->its_vmovi_cmd.vpe;
618}
619
Marc Zyngier67047f902017-07-28 21:16:58 +0100620static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
621 struct its_cmd_block *cmd,
Marc Zyngier3171a472016-12-20 15:17:28 +0000622 struct its_cmd_desc *desc)
623{
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100624 u64 target;
625
626 target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
Marc Zyngier3171a472016-12-20 15:17:28 +0000627 its_encode_cmd(cmd, GITS_CMD_VMOVP);
628 its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
629 its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
630 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100631 its_encode_target(cmd, target);
Marc Zyngier3171a472016-12-20 15:17:28 +0000632
633 its_fixup_cmd(cmd);
634
635 return desc->its_vmovp_cmd.vpe;
636}
637
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000638static u64 its_cmd_ptr_to_offset(struct its_node *its,
639 struct its_cmd_block *ptr)
640{
641 return (ptr - its->cmd_base) * sizeof(*ptr);
642}
643
644static int its_queue_full(struct its_node *its)
645{
646 int widx;
647 int ridx;
648
649 widx = its->cmd_write - its->cmd_base;
650 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
651
652 /* This is incredibly unlikely to happen, unless the ITS locks up. */
653 if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
654 return 1;
655
656 return 0;
657}
658
659static struct its_cmd_block *its_allocate_entry(struct its_node *its)
660{
661 struct its_cmd_block *cmd;
662 u32 count = 1000000; /* 1s! */
663
664 while (its_queue_full(its)) {
665 count--;
666 if (!count) {
667 pr_err_ratelimited("ITS queue not draining\n");
668 return NULL;
669 }
670 cpu_relax();
671 udelay(1);
672 }
673
674 cmd = its->cmd_write++;
675
676 /* Handle queue wrapping */
677 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
678 its->cmd_write = its->cmd_base;
679
Marc Zyngier34d677a2016-12-19 17:16:45 +0000680 /* Clear command */
681 cmd->raw_cmd[0] = 0;
682 cmd->raw_cmd[1] = 0;
683 cmd->raw_cmd[2] = 0;
684 cmd->raw_cmd[3] = 0;
685
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000686 return cmd;
687}
688
689static struct its_cmd_block *its_post_commands(struct its_node *its)
690{
691 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
692
693 writel_relaxed(wr, its->base + GITS_CWRITER);
694
695 return its->cmd_write;
696}
697
698static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
699{
700 /*
701 * Make sure the commands written to memory are observable by
702 * the ITS.
703 */
704 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
Vladimir Murzin328191c2016-11-02 11:54:05 +0000705 gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000706 else
707 dsb(ishst);
708}
709
Marc Zyngiera19b4622017-08-04 17:45:50 +0100710static int its_wait_for_range_completion(struct its_node *its,
711 struct its_cmd_block *from,
712 struct its_cmd_block *to)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000713{
714 u64 rd_idx, from_idx, to_idx;
715 u32 count = 1000000; /* 1s! */
716
717 from_idx = its_cmd_ptr_to_offset(its, from);
718 to_idx = its_cmd_ptr_to_offset(its, to);
719
720 while (1) {
721 rd_idx = readl_relaxed(its->base + GITS_CREADR);
Marc Zyngier9bdd8b12017-08-19 10:16:02 +0100722
723 /* Direct case */
724 if (from_idx < to_idx && rd_idx >= to_idx)
725 break;
726
727 /* Wrapped case */
728 if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000729 break;
730
731 count--;
732 if (!count) {
Marc Zyngiera19b4622017-08-04 17:45:50 +0100733 pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n",
734 from_idx, to_idx, rd_idx);
735 return -1;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000736 }
737 cpu_relax();
738 udelay(1);
739 }
Marc Zyngiera19b4622017-08-04 17:45:50 +0100740
741 return 0;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000742}
743
Marc Zyngiere4f90942016-12-19 17:56:32 +0000744/* Warning, macro hell follows */
745#define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
746void name(struct its_node *its, \
747 buildtype builder, \
748 struct its_cmd_desc *desc) \
749{ \
750 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
751 synctype *sync_obj; \
752 unsigned long flags; \
753 \
754 raw_spin_lock_irqsave(&its->lock, flags); \
755 \
756 cmd = its_allocate_entry(its); \
757 if (!cmd) { /* We're soooooo screewed... */ \
758 raw_spin_unlock_irqrestore(&its->lock, flags); \
759 return; \
760 } \
Marc Zyngier67047f902017-07-28 21:16:58 +0100761 sync_obj = builder(its, cmd, desc); \
Marc Zyngiere4f90942016-12-19 17:56:32 +0000762 its_flush_cmd(its, cmd); \
763 \
764 if (sync_obj) { \
765 sync_cmd = its_allocate_entry(its); \
766 if (!sync_cmd) \
767 goto post; \
768 \
Marc Zyngier67047f902017-07-28 21:16:58 +0100769 buildfn(its, sync_cmd, sync_obj); \
Marc Zyngiere4f90942016-12-19 17:56:32 +0000770 its_flush_cmd(its, sync_cmd); \
771 } \
772 \
773post: \
774 next_cmd = its_post_commands(its); \
775 raw_spin_unlock_irqrestore(&its->lock, flags); \
776 \
Marc Zyngiera19b4622017-08-04 17:45:50 +0100777 if (its_wait_for_range_completion(its, cmd, next_cmd)) \
778 pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000779}
780
Marc Zyngier67047f902017-07-28 21:16:58 +0100781static void its_build_sync_cmd(struct its_node *its,
782 struct its_cmd_block *sync_cmd,
Marc Zyngiere4f90942016-12-19 17:56:32 +0000783 struct its_collection *sync_col)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000784{
Marc Zyngiere4f90942016-12-19 17:56:32 +0000785 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
786 its_encode_target(sync_cmd, sync_col->target_address);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000787
Marc Zyngiere4f90942016-12-19 17:56:32 +0000788 its_fixup_cmd(sync_cmd);
789}
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000790
Marc Zyngiere4f90942016-12-19 17:56:32 +0000791static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
792 struct its_collection, its_build_sync_cmd)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000793
Marc Zyngier67047f902017-07-28 21:16:58 +0100794static void its_build_vsync_cmd(struct its_node *its,
795 struct its_cmd_block *sync_cmd,
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000796 struct its_vpe *sync_vpe)
797{
798 its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
799 its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000800
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000801 its_fixup_cmd(sync_cmd);
802}
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000803
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000804static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
805 struct its_vpe, its_build_vsync_cmd)
806
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000807static void its_send_int(struct its_device *dev, u32 event_id)
808{
809 struct its_cmd_desc desc;
810
811 desc.its_int_cmd.dev = dev;
812 desc.its_int_cmd.event_id = event_id;
813
814 its_send_single_command(dev->its, its_build_int_cmd, &desc);
815}
816
817static void its_send_clear(struct its_device *dev, u32 event_id)
818{
819 struct its_cmd_desc desc;
820
821 desc.its_clear_cmd.dev = dev;
822 desc.its_clear_cmd.event_id = event_id;
823
824 its_send_single_command(dev->its, its_build_clear_cmd, &desc);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000825}
826
827static void its_send_inv(struct its_device *dev, u32 event_id)
828{
829 struct its_cmd_desc desc;
830
831 desc.its_inv_cmd.dev = dev;
832 desc.its_inv_cmd.event_id = event_id;
833
834 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
835}
836
837static void its_send_mapd(struct its_device *dev, int valid)
838{
839 struct its_cmd_desc desc;
840
841 desc.its_mapd_cmd.dev = dev;
842 desc.its_mapd_cmd.valid = !!valid;
843
844 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
845}
846
847static void its_send_mapc(struct its_node *its, struct its_collection *col,
848 int valid)
849{
850 struct its_cmd_desc desc;
851
852 desc.its_mapc_cmd.col = col;
853 desc.its_mapc_cmd.valid = !!valid;
854
855 its_send_single_command(its, its_build_mapc_cmd, &desc);
856}
857
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000858static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000859{
860 struct its_cmd_desc desc;
861
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000862 desc.its_mapti_cmd.dev = dev;
863 desc.its_mapti_cmd.phys_id = irq_id;
864 desc.its_mapti_cmd.event_id = id;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000865
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000866 its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000867}
868
869static void its_send_movi(struct its_device *dev,
870 struct its_collection *col, u32 id)
871{
872 struct its_cmd_desc desc;
873
874 desc.its_movi_cmd.dev = dev;
875 desc.its_movi_cmd.col = col;
Marc Zyngier591e5be2015-07-17 10:46:42 +0100876 desc.its_movi_cmd.event_id = id;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000877
878 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
879}
880
881static void its_send_discard(struct its_device *dev, u32 id)
882{
883 struct its_cmd_desc desc;
884
885 desc.its_discard_cmd.dev = dev;
886 desc.its_discard_cmd.event_id = id;
887
888 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
889}
890
891static void its_send_invall(struct its_node *its, struct its_collection *col)
892{
893 struct its_cmd_desc desc;
894
895 desc.its_invall_cmd.col = col;
896
897 its_send_single_command(its, its_build_invall_cmd, &desc);
898}
Marc Zyngierc48ed512014-11-24 14:35:12 +0000899
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000900static void its_send_vmapti(struct its_device *dev, u32 id)
901{
902 struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
903 struct its_cmd_desc desc;
904
905 desc.its_vmapti_cmd.vpe = map->vpe;
906 desc.its_vmapti_cmd.dev = dev;
907 desc.its_vmapti_cmd.virt_id = map->vintid;
908 desc.its_vmapti_cmd.event_id = id;
909 desc.its_vmapti_cmd.db_enabled = map->db_enabled;
910
911 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
912}
913
914static void its_send_vmovi(struct its_device *dev, u32 id)
915{
916 struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
917 struct its_cmd_desc desc;
918
919 desc.its_vmovi_cmd.vpe = map->vpe;
920 desc.its_vmovi_cmd.dev = dev;
921 desc.its_vmovi_cmd.event_id = id;
922 desc.its_vmovi_cmd.db_enabled = map->db_enabled;
923
924 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
925}
926
Marc Zyngier75fd9512017-10-08 18:46:39 +0100927static void its_send_vmapp(struct its_node *its,
928 struct its_vpe *vpe, bool valid)
Marc Zyngiereb781922016-12-20 14:47:05 +0000929{
930 struct its_cmd_desc desc;
Marc Zyngiereb781922016-12-20 14:47:05 +0000931
932 desc.its_vmapp_cmd.vpe = vpe;
933 desc.its_vmapp_cmd.valid = valid;
Marc Zyngier75fd9512017-10-08 18:46:39 +0100934 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
Marc Zyngiereb781922016-12-20 14:47:05 +0000935
Marc Zyngier75fd9512017-10-08 18:46:39 +0100936 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
Marc Zyngiereb781922016-12-20 14:47:05 +0000937}
938
Marc Zyngier3171a472016-12-20 15:17:28 +0000939static void its_send_vmovp(struct its_vpe *vpe)
940{
941 struct its_cmd_desc desc;
942 struct its_node *its;
943 unsigned long flags;
944 int col_id = vpe->col_idx;
945
946 desc.its_vmovp_cmd.vpe = vpe;
947 desc.its_vmovp_cmd.its_list = (u16)its_list_map;
948
949 if (!its_list_map) {
950 its = list_first_entry(&its_nodes, struct its_node, entry);
951 desc.its_vmovp_cmd.seq_num = 0;
952 desc.its_vmovp_cmd.col = &its->collections[col_id];
953 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
954 return;
955 }
956
957 /*
958 * Yet another marvel of the architecture. If using the
959 * its_list "feature", we need to make sure that all ITSs
960 * receive all VMOVP commands in the same order. The only way
961 * to guarantee this is to make vmovp a serialization point.
962 *
963 * Wall <-- Head.
964 */
965 raw_spin_lock_irqsave(&vmovp_lock, flags);
966
967 desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
968
969 /* Emit VMOVPs */
970 list_for_each_entry(its, &its_nodes, entry) {
971 if (!its->is_v4)
972 continue;
973
Marc Zyngier2247e1b2017-10-08 18:50:36 +0100974 if (!vpe->its_vm->vlpi_count[its->list_nr])
975 continue;
976
Marc Zyngier3171a472016-12-20 15:17:28 +0000977 desc.its_vmovp_cmd.col = &its->collections[col_id];
978 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
979 }
980
981 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
982}
983
Marc Zyngier40619a22017-10-08 15:16:09 +0100984static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
Marc Zyngiereb781922016-12-20 14:47:05 +0000985{
986 struct its_cmd_desc desc;
Marc Zyngiereb781922016-12-20 14:47:05 +0000987
988 desc.its_vinvall_cmd.vpe = vpe;
Marc Zyngier40619a22017-10-08 15:16:09 +0100989 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
Marc Zyngiereb781922016-12-20 14:47:05 +0000990}
991
Marc Zyngierc48ed512014-11-24 14:35:12 +0000992/*
993 * irqchip functions - assumes MSI, mostly.
994 */
995
996static inline u32 its_get_event_id(struct irq_data *d)
997{
998 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
Marc Zyngier591e5be2015-07-17 10:46:42 +0100999 return d->hwirq - its_dev->event_map.lpi_base;
Marc Zyngierc48ed512014-11-24 14:35:12 +00001000}
1001
Marc Zyngier015ec032016-12-20 09:54:57 +00001002static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
Marc Zyngierc48ed512014-11-24 14:35:12 +00001003{
Marc Zyngier015ec032016-12-20 09:54:57 +00001004 irq_hw_number_t hwirq;
Marc Zyngieradcdb942016-12-19 19:18:13 +00001005 struct page *prop_page;
1006 u8 *cfg;
Marc Zyngierc48ed512014-11-24 14:35:12 +00001007
Marc Zyngier015ec032016-12-20 09:54:57 +00001008 if (irqd_is_forwarded_to_vcpu(d)) {
1009 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1010 u32 event = its_get_event_id(d);
1011
1012 prop_page = its_dev->event_map.vm->vprop_page;
1013 hwirq = its_dev->event_map.vlpi_maps[event].vintid;
1014 } else {
1015 prop_page = gic_rdists->prop_page;
1016 hwirq = d->hwirq;
1017 }
Marc Zyngieradcdb942016-12-19 19:18:13 +00001018
1019 cfg = page_address(prop_page) + hwirq - 8192;
1020 *cfg &= ~clr;
Marc Zyngier015ec032016-12-20 09:54:57 +00001021 *cfg |= set | LPI_PROP_GROUP1;
Marc Zyngierc48ed512014-11-24 14:35:12 +00001022
1023 /*
1024 * Make the above write visible to the redistributors.
1025 * And yes, we're flushing exactly: One. Single. Byte.
1026 * Humpf...
1027 */
1028 if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
Vladimir Murzin328191c2016-11-02 11:54:05 +00001029 gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
Marc Zyngierc48ed512014-11-24 14:35:12 +00001030 else
1031 dsb(ishst);
Marc Zyngier015ec032016-12-20 09:54:57 +00001032}
1033
1034static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1035{
1036 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1037
1038 lpi_write_config(d, clr, set);
Marc Zyngieradcdb942016-12-19 19:18:13 +00001039 its_send_inv(its_dev, its_get_event_id(d));
Marc Zyngierc48ed512014-11-24 14:35:12 +00001040}
1041
Marc Zyngier015ec032016-12-20 09:54:57 +00001042static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1043{
1044 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1045 u32 event = its_get_event_id(d);
1046
1047 if (its_dev->event_map.vlpi_maps[event].db_enabled == enable)
1048 return;
1049
1050 its_dev->event_map.vlpi_maps[event].db_enabled = enable;
1051
1052 /*
1053 * More fun with the architecture:
1054 *
1055 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1056 * value or to 1023, depending on the enable bit. But that
1057 * would be issueing a mapping for an /existing/ DevID+EventID
1058 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1059 * to the /same/ vPE, using this opportunity to adjust the
1060 * doorbell. Mouahahahaha. We loves it, Precious.
1061 */
1062 its_send_vmovi(its_dev, event);
Marc Zyngierc48ed512014-11-24 14:35:12 +00001063}
1064
1065static void its_mask_irq(struct irq_data *d)
1066{
Marc Zyngier015ec032016-12-20 09:54:57 +00001067 if (irqd_is_forwarded_to_vcpu(d))
1068 its_vlpi_set_doorbell(d, false);
1069
Marc Zyngieradcdb942016-12-19 19:18:13 +00001070 lpi_update_config(d, LPI_PROP_ENABLED, 0);
Marc Zyngierc48ed512014-11-24 14:35:12 +00001071}
1072
1073static void its_unmask_irq(struct irq_data *d)
1074{
Marc Zyngier015ec032016-12-20 09:54:57 +00001075 if (irqd_is_forwarded_to_vcpu(d))
1076 its_vlpi_set_doorbell(d, true);
1077
Marc Zyngieradcdb942016-12-19 19:18:13 +00001078 lpi_update_config(d, 0, LPI_PROP_ENABLED);
Marc Zyngierc48ed512014-11-24 14:35:12 +00001079}
1080
Marc Zyngierc48ed512014-11-24 14:35:12 +00001081static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1082 bool force)
1083{
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02001084 unsigned int cpu;
1085 const struct cpumask *cpu_mask = cpu_online_mask;
Marc Zyngierc48ed512014-11-24 14:35:12 +00001086 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1087 struct its_collection *target_col;
1088 u32 id = its_get_event_id(d);
1089
Marc Zyngier015ec032016-12-20 09:54:57 +00001090 /* A forwarded interrupt should use irq_set_vcpu_affinity */
1091 if (irqd_is_forwarded_to_vcpu(d))
1092 return -EINVAL;
1093
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02001094 /* lpi cannot be routed to a redistributor that is on a foreign node */
1095 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
1096 if (its_dev->its->numa_node >= 0) {
1097 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
1098 if (!cpumask_intersects(mask_val, cpu_mask))
1099 return -EINVAL;
1100 }
1101 }
1102
1103 cpu = cpumask_any_and(mask_val, cpu_mask);
1104
Marc Zyngierc48ed512014-11-24 14:35:12 +00001105 if (cpu >= nr_cpu_ids)
1106 return -EINVAL;
1107
MaJun8b8d94a2017-05-18 16:19:13 +08001108 /* don't set the affinity when the target cpu is same as current one */
1109 if (cpu != its_dev->event_map.col_map[id]) {
1110 target_col = &its_dev->its->collections[cpu];
1111 its_send_movi(its_dev, target_col, id);
1112 its_dev->event_map.col_map[id] = cpu;
Marc Zyngier0d224d32017-08-18 09:39:18 +01001113 irq_data_update_effective_affinity(d, cpumask_of(cpu));
MaJun8b8d94a2017-05-18 16:19:13 +08001114 }
Marc Zyngierc48ed512014-11-24 14:35:12 +00001115
1116 return IRQ_SET_MASK_OK_DONE;
1117}
1118
Ard Biesheuvel558b0162017-10-17 17:55:56 +01001119static u64 its_irq_get_msi_base(struct its_device *its_dev)
1120{
1121 struct its_node *its = its_dev->its;
1122
1123 return its->phys_base + GITS_TRANSLATER;
1124}
1125
Marc Zyngierb48ac832014-11-24 14:35:16 +00001126static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1127{
1128 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1129 struct its_node *its;
1130 u64 addr;
1131
1132 its = its_dev->its;
Ard Biesheuvel558b0162017-10-17 17:55:56 +01001133 addr = its->get_msi_base(its_dev);
Marc Zyngierb48ac832014-11-24 14:35:16 +00001134
Vladimir Murzinb11283e2016-11-02 11:54:03 +00001135 msg->address_lo = lower_32_bits(addr);
1136 msg->address_hi = upper_32_bits(addr);
Marc Zyngierb48ac832014-11-24 14:35:16 +00001137 msg->data = its_get_event_id(d);
Robin Murphy44bb7e22016-09-12 17:13:59 +01001138
1139 iommu_dma_map_msi_msg(d->irq, msg);
Marc Zyngierb48ac832014-11-24 14:35:16 +00001140}
1141
Marc Zyngier8d85dce2016-12-19 18:02:13 +00001142static int its_irq_set_irqchip_state(struct irq_data *d,
1143 enum irqchip_irq_state which,
1144 bool state)
1145{
1146 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1147 u32 event = its_get_event_id(d);
1148
1149 if (which != IRQCHIP_STATE_PENDING)
1150 return -EINVAL;
1151
1152 if (state)
1153 its_send_int(its_dev, event);
1154 else
1155 its_send_clear(its_dev, event);
1156
1157 return 0;
1158}
1159
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001160static void its_map_vm(struct its_node *its, struct its_vm *vm)
1161{
1162 unsigned long flags;
1163
1164 /* Not using the ITS list? Everything is always mapped. */
1165 if (!its_list_map)
1166 return;
1167
1168 raw_spin_lock_irqsave(&vmovp_lock, flags);
1169
1170 /*
1171 * If the VM wasn't mapped yet, iterate over the vpes and get
1172 * them mapped now.
1173 */
1174 vm->vlpi_count[its->list_nr]++;
1175
1176 if (vm->vlpi_count[its->list_nr] == 1) {
1177 int i;
1178
1179 for (i = 0; i < vm->nr_vpes; i++) {
1180 struct its_vpe *vpe = vm->vpes[i];
1181
1182 /* Map the VPE to the first possible CPU */
1183 vpe->col_idx = cpumask_first(cpu_online_mask);
1184 its_send_vmapp(its, vpe, true);
1185 its_send_vinvall(its, vpe);
1186 }
1187 }
1188
1189 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1190}
1191
1192static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
1193{
1194 unsigned long flags;
1195
1196 /* Not using the ITS list? Everything is always mapped. */
1197 if (!its_list_map)
1198 return;
1199
1200 raw_spin_lock_irqsave(&vmovp_lock, flags);
1201
1202 if (!--vm->vlpi_count[its->list_nr]) {
1203 int i;
1204
1205 for (i = 0; i < vm->nr_vpes; i++)
1206 its_send_vmapp(its, vm->vpes[i], false);
1207 }
1208
1209 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1210}
1211
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001212static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1213{
1214 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1215 u32 event = its_get_event_id(d);
1216 int ret = 0;
1217
1218 if (!info->map)
1219 return -EINVAL;
1220
1221 mutex_lock(&its_dev->event_map.vlpi_lock);
1222
1223 if (!its_dev->event_map.vm) {
1224 struct its_vlpi_map *maps;
1225
1226 maps = kzalloc(sizeof(*maps) * its_dev->event_map.nr_lpis,
1227 GFP_KERNEL);
1228 if (!maps) {
1229 ret = -ENOMEM;
1230 goto out;
1231 }
1232
1233 its_dev->event_map.vm = info->map->vm;
1234 its_dev->event_map.vlpi_maps = maps;
1235 } else if (its_dev->event_map.vm != info->map->vm) {
1236 ret = -EINVAL;
1237 goto out;
1238 }
1239
1240 /* Get our private copy of the mapping information */
1241 its_dev->event_map.vlpi_maps[event] = *info->map;
1242
1243 if (irqd_is_forwarded_to_vcpu(d)) {
1244 /* Already mapped, move it around */
1245 its_send_vmovi(its_dev, event);
1246 } else {
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001247 /* Ensure all the VPEs are mapped on this ITS */
1248 its_map_vm(its_dev->its, info->map->vm);
1249
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001250 /* Drop the physical mapping */
1251 its_send_discard(its_dev, event);
1252
1253 /* and install the virtual one */
1254 its_send_vmapti(its_dev, event);
1255 irqd_set_forwarded_to_vcpu(d);
1256
1257 /* Increment the number of VLPIs */
1258 its_dev->event_map.nr_vlpis++;
1259 }
1260
1261out:
1262 mutex_unlock(&its_dev->event_map.vlpi_lock);
1263 return ret;
1264}
1265
1266static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1267{
1268 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1269 u32 event = its_get_event_id(d);
1270 int ret = 0;
1271
1272 mutex_lock(&its_dev->event_map.vlpi_lock);
1273
1274 if (!its_dev->event_map.vm ||
1275 !its_dev->event_map.vlpi_maps[event].vm) {
1276 ret = -EINVAL;
1277 goto out;
1278 }
1279
1280 /* Copy our mapping information to the incoming request */
1281 *info->map = its_dev->event_map.vlpi_maps[event];
1282
1283out:
1284 mutex_unlock(&its_dev->event_map.vlpi_lock);
1285 return ret;
1286}
1287
1288static int its_vlpi_unmap(struct irq_data *d)
1289{
1290 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1291 u32 event = its_get_event_id(d);
1292 int ret = 0;
1293
1294 mutex_lock(&its_dev->event_map.vlpi_lock);
1295
1296 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
1297 ret = -EINVAL;
1298 goto out;
1299 }
1300
1301 /* Drop the virtual mapping */
1302 its_send_discard(its_dev, event);
1303
1304 /* and restore the physical one */
1305 irqd_clr_forwarded_to_vcpu(d);
1306 its_send_mapti(its_dev, d->hwirq, event);
1307 lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
1308 LPI_PROP_ENABLED |
1309 LPI_PROP_GROUP1));
1310
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001311 /* Potentially unmap the VM from this ITS */
1312 its_unmap_vm(its_dev->its, its_dev->event_map.vm);
1313
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001314 /*
1315 * Drop the refcount and make the device available again if
1316 * this was the last VLPI.
1317 */
1318 if (!--its_dev->event_map.nr_vlpis) {
1319 its_dev->event_map.vm = NULL;
1320 kfree(its_dev->event_map.vlpi_maps);
1321 }
1322
1323out:
1324 mutex_unlock(&its_dev->event_map.vlpi_lock);
1325 return ret;
1326}
1327
Marc Zyngier015ec032016-12-20 09:54:57 +00001328static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
1329{
1330 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1331
1332 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1333 return -EINVAL;
1334
1335 if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
1336 lpi_update_config(d, 0xff, info->config);
1337 else
1338 lpi_write_config(d, 0xff, info->config);
1339 its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
1340
1341 return 0;
1342}
1343
Marc Zyngierc808eea2016-12-20 09:31:20 +00001344static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
1345{
1346 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1347 struct its_cmd_info *info = vcpu_info;
1348
1349 /* Need a v4 ITS */
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001350 if (!its_dev->its->is_v4)
Marc Zyngierc808eea2016-12-20 09:31:20 +00001351 return -EINVAL;
1352
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001353 /* Unmap request? */
1354 if (!info)
1355 return its_vlpi_unmap(d);
1356
Marc Zyngierc808eea2016-12-20 09:31:20 +00001357 switch (info->cmd_type) {
1358 case MAP_VLPI:
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001359 return its_vlpi_map(d, info);
Marc Zyngierc808eea2016-12-20 09:31:20 +00001360
1361 case GET_VLPI:
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001362 return its_vlpi_get(d, info);
Marc Zyngierc808eea2016-12-20 09:31:20 +00001363
1364 case PROP_UPDATE_VLPI:
1365 case PROP_UPDATE_AND_INV_VLPI:
Marc Zyngier015ec032016-12-20 09:54:57 +00001366 return its_vlpi_prop_update(d, info);
Marc Zyngierc808eea2016-12-20 09:31:20 +00001367
1368 default:
1369 return -EINVAL;
1370 }
1371}
1372
Marc Zyngierc48ed512014-11-24 14:35:12 +00001373static struct irq_chip its_irq_chip = {
1374 .name = "ITS",
1375 .irq_mask = its_mask_irq,
1376 .irq_unmask = its_unmask_irq,
Ashok Kumar004fa082016-02-11 05:38:53 -08001377 .irq_eoi = irq_chip_eoi_parent,
Marc Zyngierc48ed512014-11-24 14:35:12 +00001378 .irq_set_affinity = its_set_affinity,
Marc Zyngierb48ac832014-11-24 14:35:16 +00001379 .irq_compose_msi_msg = its_irq_compose_msi_msg,
Marc Zyngier8d85dce2016-12-19 18:02:13 +00001380 .irq_set_irqchip_state = its_irq_set_irqchip_state,
Marc Zyngierc808eea2016-12-20 09:31:20 +00001381 .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
Marc Zyngierb48ac832014-11-24 14:35:16 +00001382};
1383
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001384/*
1385 * How we allocate LPIs:
1386 *
1387 * The GIC has id_bits bits for interrupt identifiers. From there, we
1388 * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as
1389 * we allocate LPIs by chunks of 32, we can shift the whole thing by 5
1390 * bits to the right.
1391 *
1392 * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
1393 */
1394#define IRQS_PER_CHUNK_SHIFT 5
1395#define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT)
Shanker Donthineni6c31e122017-06-22 18:19:14 -05001396#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001397
1398static unsigned long *lpi_bitmap;
1399static u32 lpi_chunks;
1400static DEFINE_SPINLOCK(lpi_lock);
1401
1402static int its_lpi_to_chunk(int lpi)
1403{
1404 return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT;
1405}
1406
1407static int its_chunk_to_lpi(int chunk)
1408{
1409 return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192;
1410}
1411
Tomasz Nowicki04a0e4d2016-01-19 14:11:18 +01001412static int __init its_lpi_init(u32 id_bits)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001413{
1414 lpi_chunks = its_lpi_to_chunk(1UL << id_bits);
1415
1416 lpi_bitmap = kzalloc(BITS_TO_LONGS(lpi_chunks) * sizeof(long),
1417 GFP_KERNEL);
1418 if (!lpi_bitmap) {
1419 lpi_chunks = 0;
1420 return -ENOMEM;
1421 }
1422
1423 pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks);
1424 return 0;
1425}
1426
1427static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids)
1428{
1429 unsigned long *bitmap = NULL;
1430 int chunk_id;
1431 int nr_chunks;
1432 int i;
1433
1434 nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK);
1435
1436 spin_lock(&lpi_lock);
1437
1438 do {
1439 chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks,
1440 0, nr_chunks, 0);
1441 if (chunk_id < lpi_chunks)
1442 break;
1443
1444 nr_chunks--;
1445 } while (nr_chunks > 0);
1446
1447 if (!nr_chunks)
1448 goto out;
1449
1450 bitmap = kzalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK) * sizeof (long),
1451 GFP_ATOMIC);
1452 if (!bitmap)
1453 goto out;
1454
1455 for (i = 0; i < nr_chunks; i++)
1456 set_bit(chunk_id + i, lpi_bitmap);
1457
1458 *base = its_chunk_to_lpi(chunk_id);
1459 *nr_ids = nr_chunks * IRQS_PER_CHUNK;
1460
1461out:
1462 spin_unlock(&lpi_lock);
1463
Marc Zyngierc8415b92015-10-02 16:44:05 +01001464 if (!bitmap)
1465 *base = *nr_ids = 0;
1466
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001467 return bitmap;
1468}
1469
Marc Zyngiercf2be8b2016-12-19 18:49:59 +00001470static void its_lpi_free_chunks(unsigned long *bitmap, int base, int nr_ids)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001471{
1472 int lpi;
1473
1474 spin_lock(&lpi_lock);
1475
1476 for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) {
1477 int chunk = its_lpi_to_chunk(lpi);
Marc Zyngiercf2be8b2016-12-19 18:49:59 +00001478
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001479 BUG_ON(chunk > lpi_chunks);
1480 if (test_bit(chunk, lpi_bitmap)) {
1481 clear_bit(chunk, lpi_bitmap);
1482 } else {
1483 pr_err("Bad LPI chunk %d\n", chunk);
1484 }
1485 }
1486
1487 spin_unlock(&lpi_lock);
1488
Marc Zyngiercf2be8b2016-12-19 18:49:59 +00001489 kfree(bitmap);
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001490}
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001491
Marc Zyngier0e5ccf92016-12-19 18:15:05 +00001492static struct page *its_allocate_prop_table(gfp_t gfp_flags)
1493{
1494 struct page *prop_page;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001495
Marc Zyngier0e5ccf92016-12-19 18:15:05 +00001496 prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
1497 if (!prop_page)
1498 return NULL;
1499
1500 /* Priority 0xa0, Group-1, disabled */
1501 memset(page_address(prop_page),
1502 LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1,
1503 LPI_PROPBASE_SZ);
1504
1505 /* Make sure the GIC will observe the written configuration */
1506 gic_flush_dcache_to_poc(page_address(prop_page), LPI_PROPBASE_SZ);
1507
1508 return prop_page;
1509}
1510
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00001511static void its_free_prop_table(struct page *prop_page)
1512{
1513 free_pages((unsigned long)page_address(prop_page),
1514 get_order(LPI_PROPBASE_SZ));
1515}
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001516
1517static int __init its_alloc_lpi_tables(void)
1518{
1519 phys_addr_t paddr;
1520
Shanker Donthineni6c31e122017-06-22 18:19:14 -05001521 lpi_id_bits = min_t(u32, gic_rdists->id_bits, ITS_MAX_LPI_NRBITS);
Marc Zyngier0e5ccf92016-12-19 18:15:05 +00001522 gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001523 if (!gic_rdists->prop_page) {
1524 pr_err("Failed to allocate PROPBASE\n");
1525 return -ENOMEM;
1526 }
1527
1528 paddr = page_to_phys(gic_rdists->prop_page);
1529 pr_info("GIC: using LPI property table @%pa\n", &paddr);
1530
Shanker Donthineni6c31e122017-06-22 18:19:14 -05001531 return its_lpi_init(lpi_id_bits);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001532}
1533
1534static const char *its_base_type_string[] = {
1535 [GITS_BASER_TYPE_DEVICE] = "Devices",
1536 [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
Marc Zyngier4f46de92016-12-20 15:50:14 +00001537 [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)",
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001538 [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
1539 [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
1540 [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
1541 [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
1542};
1543
Shanker Donthineni2d81d422016-06-06 18:17:28 -05001544static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
1545{
1546 u32 idx = baser - its->tables;
1547
Vladimir Murzin0968a612016-11-02 11:54:06 +00001548 return gits_read_baser(its->base + GITS_BASER + (idx << 3));
Shanker Donthineni2d81d422016-06-06 18:17:28 -05001549}
1550
1551static void its_write_baser(struct its_node *its, struct its_baser *baser,
1552 u64 val)
1553{
1554 u32 idx = baser - its->tables;
1555
Vladimir Murzin0968a612016-11-02 11:54:06 +00001556 gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
Shanker Donthineni2d81d422016-06-06 18:17:28 -05001557 baser->val = its_read_baser(its, baser);
1558}
1559
Shanker Donthineni93473592016-06-06 18:17:30 -05001560static int its_setup_baser(struct its_node *its, struct its_baser *baser,
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001561 u64 cache, u64 shr, u32 psz, u32 order,
1562 bool indirect)
Shanker Donthineni93473592016-06-06 18:17:30 -05001563{
1564 u64 val = its_read_baser(its, baser);
1565 u64 esz = GITS_BASER_ENTRY_SIZE(val);
1566 u64 type = GITS_BASER_TYPE(val);
1567 u32 alloc_pages;
1568 void *base;
1569 u64 tmp;
1570
1571retry_alloc_baser:
1572 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
1573 if (alloc_pages > GITS_BASER_PAGES_MAX) {
1574 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
1575 &its->phys_base, its_base_type_string[type],
1576 alloc_pages, GITS_BASER_PAGES_MAX);
1577 alloc_pages = GITS_BASER_PAGES_MAX;
1578 order = get_order(GITS_BASER_PAGES_MAX * psz);
1579 }
1580
1581 base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
1582 if (!base)
1583 return -ENOMEM;
1584
1585retry_baser:
1586 val = (virt_to_phys(base) |
1587 (type << GITS_BASER_TYPE_SHIFT) |
1588 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
1589 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
1590 cache |
1591 shr |
1592 GITS_BASER_VALID);
1593
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001594 val |= indirect ? GITS_BASER_INDIRECT : 0x0;
1595
Shanker Donthineni93473592016-06-06 18:17:30 -05001596 switch (psz) {
1597 case SZ_4K:
1598 val |= GITS_BASER_PAGE_SIZE_4K;
1599 break;
1600 case SZ_16K:
1601 val |= GITS_BASER_PAGE_SIZE_16K;
1602 break;
1603 case SZ_64K:
1604 val |= GITS_BASER_PAGE_SIZE_64K;
1605 break;
1606 }
1607
1608 its_write_baser(its, baser, val);
1609 tmp = baser->val;
1610
1611 if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
1612 /*
1613 * Shareability didn't stick. Just use
1614 * whatever the read reported, which is likely
1615 * to be the only thing this redistributor
1616 * supports. If that's zero, make it
1617 * non-cacheable as well.
1618 */
1619 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
1620 if (!shr) {
1621 cache = GITS_BASER_nC;
Vladimir Murzin328191c2016-11-02 11:54:05 +00001622 gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
Shanker Donthineni93473592016-06-06 18:17:30 -05001623 }
1624 goto retry_baser;
1625 }
1626
1627 if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
1628 /*
1629 * Page size didn't stick. Let's try a smaller
1630 * size and retry. If we reach 4K, then
1631 * something is horribly wrong...
1632 */
1633 free_pages((unsigned long)base, order);
1634 baser->base = NULL;
1635
1636 switch (psz) {
1637 case SZ_16K:
1638 psz = SZ_4K;
1639 goto retry_alloc_baser;
1640 case SZ_64K:
1641 psz = SZ_16K;
1642 goto retry_alloc_baser;
1643 }
1644 }
1645
1646 if (val != tmp) {
Vladimir Murzinb11283e2016-11-02 11:54:03 +00001647 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
Shanker Donthineni93473592016-06-06 18:17:30 -05001648 &its->phys_base, its_base_type_string[type],
Vladimir Murzinb11283e2016-11-02 11:54:03 +00001649 val, tmp);
Shanker Donthineni93473592016-06-06 18:17:30 -05001650 free_pages((unsigned long)base, order);
1651 return -ENXIO;
1652 }
1653
1654 baser->order = order;
1655 baser->base = base;
1656 baser->psz = psz;
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001657 tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
Shanker Donthineni93473592016-06-06 18:17:30 -05001658
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001659 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
Vladimir Murzind524eaa2016-11-02 11:54:04 +00001660 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
Shanker Donthineni93473592016-06-06 18:17:30 -05001661 its_base_type_string[type],
1662 (unsigned long)virt_to_phys(base),
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001663 indirect ? "indirect" : "flat", (int)esz,
Shanker Donthineni93473592016-06-06 18:17:30 -05001664 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
1665
1666 return 0;
1667}
1668
Marc Zyngier4cacac52016-12-19 18:18:34 +00001669static bool its_parse_indirect_baser(struct its_node *its,
1670 struct its_baser *baser,
1671 u32 psz, u32 *order)
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001672{
Marc Zyngier4cacac52016-12-19 18:18:34 +00001673 u64 tmp = its_read_baser(its, baser);
1674 u64 type = GITS_BASER_TYPE(tmp);
1675 u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06001676 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001677 u32 ids = its->device_ids;
1678 u32 new_order = *order;
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001679 bool indirect = false;
1680
1681 /* No need to enable Indirection if memory requirement < (psz*2)bytes */
1682 if ((esz << ids) > (psz * 2)) {
1683 /*
1684 * Find out whether hw supports a single or two-level table by
1685 * table by reading bit at offset '62' after writing '1' to it.
1686 */
1687 its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
1688 indirect = !!(baser->val & GITS_BASER_INDIRECT);
1689
1690 if (indirect) {
1691 /*
1692 * The size of the lvl2 table is equal to ITS page size
1693 * which is 'psz'. For computing lvl1 table size,
1694 * subtract ID bits that sparse lvl2 table from 'ids'
1695 * which is reported by ITS hardware times lvl1 table
1696 * entry size.
1697 */
Vladimir Murzind524eaa2016-11-02 11:54:04 +00001698 ids -= ilog2(psz / (int)esz);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001699 esz = GITS_LVL1_ENTRY_SIZE;
1700 }
1701 }
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001702
1703 /*
1704 * Allocate as many entries as required to fit the
1705 * range of device IDs that the ITS can grok... The ID
1706 * space being incredibly sparse, this results in a
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001707 * massive waste of memory if two-level device table
1708 * feature is not supported by hardware.
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001709 */
1710 new_order = max_t(u32, get_order(esz << ids), new_order);
1711 if (new_order >= MAX_ORDER) {
1712 new_order = MAX_ORDER - 1;
Vladimir Murzind524eaa2016-11-02 11:54:04 +00001713 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
Marc Zyngier4cacac52016-12-19 18:18:34 +00001714 pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n",
1715 &its->phys_base, its_base_type_string[type],
1716 its->device_ids, ids);
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001717 }
1718
1719 *order = new_order;
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001720
1721 return indirect;
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001722}
1723
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001724static void its_free_tables(struct its_node *its)
1725{
1726 int i;
1727
1728 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
Shanker Donthineni1a485f42016-02-01 20:19:44 -06001729 if (its->tables[i].base) {
1730 free_pages((unsigned long)its->tables[i].base,
1731 its->tables[i].order);
1732 its->tables[i].base = NULL;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001733 }
1734 }
1735}
1736
Shanker Donthineni0e0b0f62016-06-06 18:17:31 -05001737static int its_alloc_tables(struct its_node *its)
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001738{
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001739 u64 shr = GITS_BASER_InnerShareable;
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06001740 u64 cache = GITS_BASER_RaWaWb;
Shanker Donthineni93473592016-06-06 18:17:30 -05001741 u32 psz = SZ_64K;
1742 int err, i;
Robert Richter94100972015-09-21 22:58:38 +02001743
Ard Biesheuvelfa150012017-10-17 17:55:54 +01001744 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
1745 /* erratum 24313: ignore memory access type */
1746 cache = GITS_BASER_nCnB;
Shanker Donthineni466b7d12016-03-09 22:10:49 -06001747
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001748 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
Shanker Donthineni2d81d422016-06-06 18:17:28 -05001749 struct its_baser *baser = its->tables + i;
1750 u64 val = its_read_baser(its, baser);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001751 u64 type = GITS_BASER_TYPE(val);
Shanker Donthineni93473592016-06-06 18:17:30 -05001752 u32 order = get_order(psz);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001753 bool indirect = false;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001754
Marc Zyngier4cacac52016-12-19 18:18:34 +00001755 switch (type) {
1756 case GITS_BASER_TYPE_NONE:
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001757 continue;
1758
Marc Zyngier4cacac52016-12-19 18:18:34 +00001759 case GITS_BASER_TYPE_DEVICE:
1760 case GITS_BASER_TYPE_VCPU:
1761 indirect = its_parse_indirect_baser(its, baser,
1762 psz, &order);
1763 break;
1764 }
Marc Zyngierf54b97e2015-03-06 16:37:41 +00001765
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001766 err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
Shanker Donthineni93473592016-06-06 18:17:30 -05001767 if (err < 0) {
1768 its_free_tables(its);
1769 return err;
Robert Richter30f21362015-09-21 22:58:34 +02001770 }
1771
Shanker Donthineni93473592016-06-06 18:17:30 -05001772 /* Update settings which will be used for next BASERn */
1773 psz = baser->psz;
1774 cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
1775 shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001776 }
1777
1778 return 0;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001779}
1780
1781static int its_alloc_collections(struct its_node *its)
1782{
1783 its->collections = kzalloc(nr_cpu_ids * sizeof(*its->collections),
1784 GFP_KERNEL);
1785 if (!its->collections)
1786 return -ENOMEM;
1787
1788 return 0;
1789}
1790
Marc Zyngier7c297a22016-12-19 18:34:38 +00001791static struct page *its_allocate_pending_table(gfp_t gfp_flags)
1792{
1793 struct page *pend_page;
1794 /*
1795 * The pending pages have to be at least 64kB aligned,
1796 * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
1797 */
1798 pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
1799 get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
1800 if (!pend_page)
1801 return NULL;
1802
1803 /* Make sure the GIC will observe the zero-ed page */
1804 gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
1805
1806 return pend_page;
1807}
1808
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00001809static void its_free_pending_table(struct page *pt)
1810{
1811 free_pages((unsigned long)page_address(pt),
1812 get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
1813}
1814
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001815static void its_cpu_init_lpis(void)
1816{
1817 void __iomem *rbase = gic_data_rdist_rd_base();
1818 struct page *pend_page;
1819 u64 val, tmp;
1820
1821 /* If we didn't allocate the pending table yet, do it now */
1822 pend_page = gic_data_rdist()->pend_page;
1823 if (!pend_page) {
1824 phys_addr_t paddr;
Marc Zyngier7c297a22016-12-19 18:34:38 +00001825
1826 pend_page = its_allocate_pending_table(GFP_NOWAIT);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001827 if (!pend_page) {
1828 pr_err("Failed to allocate PENDBASE for CPU%d\n",
1829 smp_processor_id());
1830 return;
1831 }
1832
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001833 paddr = page_to_phys(pend_page);
1834 pr_info("CPU%d: using LPI pending table @%pa\n",
1835 smp_processor_id(), &paddr);
1836 gic_data_rdist()->pend_page = pend_page;
1837 }
1838
1839 /* Disable LPIs */
1840 val = readl_relaxed(rbase + GICR_CTLR);
1841 val &= ~GICR_CTLR_ENABLE_LPIS;
1842 writel_relaxed(val, rbase + GICR_CTLR);
1843
1844 /*
1845 * Make sure any change to the table is observable by the GIC.
1846 */
1847 dsb(sy);
1848
1849 /* set PROPBASE */
1850 val = (page_to_phys(gic_rdists->prop_page) |
1851 GICR_PROPBASER_InnerShareable |
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06001852 GICR_PROPBASER_RaWaWb |
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001853 ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
1854
Vladimir Murzin0968a612016-11-02 11:54:06 +00001855 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
1856 tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001857
1858 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
Marc Zyngier241a3862015-03-27 14:15:05 +00001859 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
1860 /*
1861 * The HW reports non-shareable, we must
1862 * remove the cacheability attributes as
1863 * well.
1864 */
1865 val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
1866 GICR_PROPBASER_CACHEABILITY_MASK);
1867 val |= GICR_PROPBASER_nC;
Vladimir Murzin0968a612016-11-02 11:54:06 +00001868 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
Marc Zyngier241a3862015-03-27 14:15:05 +00001869 }
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001870 pr_info_once("GIC: using cache flushing for LPI property table\n");
1871 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
1872 }
1873
1874 /* set PENDBASE */
1875 val = (page_to_phys(pend_page) |
Marc Zyngier4ad3e362015-03-27 14:15:04 +00001876 GICR_PENDBASER_InnerShareable |
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06001877 GICR_PENDBASER_RaWaWb);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001878
Vladimir Murzin0968a612016-11-02 11:54:06 +00001879 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
1880 tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
Marc Zyngier241a3862015-03-27 14:15:05 +00001881
1882 if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
1883 /*
1884 * The HW reports non-shareable, we must remove the
1885 * cacheability attributes as well.
1886 */
1887 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
1888 GICR_PENDBASER_CACHEABILITY_MASK);
1889 val |= GICR_PENDBASER_nC;
Vladimir Murzin0968a612016-11-02 11:54:06 +00001890 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
Marc Zyngier241a3862015-03-27 14:15:05 +00001891 }
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001892
1893 /* Enable LPIs */
1894 val = readl_relaxed(rbase + GICR_CTLR);
1895 val |= GICR_CTLR_ENABLE_LPIS;
1896 writel_relaxed(val, rbase + GICR_CTLR);
1897
1898 /* Make sure the GIC has seen the above */
1899 dsb(sy);
1900}
1901
1902static void its_cpu_init_collection(void)
1903{
1904 struct its_node *its;
1905 int cpu;
1906
1907 spin_lock(&its_lock);
1908 cpu = smp_processor_id();
1909
1910 list_for_each_entry(its, &its_nodes, entry) {
1911 u64 target;
1912
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02001913 /* avoid cross node collections and its mapping */
1914 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
1915 struct device_node *cpu_node;
1916
1917 cpu_node = of_get_cpu_node(cpu, NULL);
1918 if (its->numa_node != NUMA_NO_NODE &&
1919 its->numa_node != of_node_to_nid(cpu_node))
1920 continue;
1921 }
1922
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001923 /*
1924 * We now have to bind each collection to its target
1925 * redistributor.
1926 */
Marc Zyngier589ce5f2016-10-14 15:13:07 +01001927 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001928 /*
1929 * This ITS wants the physical address of the
1930 * redistributor.
1931 */
1932 target = gic_data_rdist()->phys_base;
1933 } else {
1934 /*
1935 * This ITS wants a linear CPU number.
1936 */
Marc Zyngier589ce5f2016-10-14 15:13:07 +01001937 target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
Marc Zyngier263fcd32015-03-27 14:15:02 +00001938 target = GICR_TYPER_CPU_NUMBER(target) << 16;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001939 }
1940
1941 /* Perform collection mapping */
1942 its->collections[cpu].target_address = target;
1943 its->collections[cpu].col_id = cpu;
1944
1945 its_send_mapc(its, &its->collections[cpu], 1);
1946 its_send_invall(its, &its->collections[cpu]);
1947 }
1948
1949 spin_unlock(&its_lock);
1950}
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00001951
1952static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
1953{
1954 struct its_device *its_dev = NULL, *tmp;
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00001955 unsigned long flags;
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00001956
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00001957 raw_spin_lock_irqsave(&its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00001958
1959 list_for_each_entry(tmp, &its->its_device_list, entry) {
1960 if (tmp->device_id == dev_id) {
1961 its_dev = tmp;
1962 break;
1963 }
1964 }
1965
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00001966 raw_spin_unlock_irqrestore(&its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00001967
1968 return its_dev;
1969}
1970
Shanker Donthineni466b7d12016-03-09 22:10:49 -06001971static struct its_baser *its_get_baser(struct its_node *its, u32 type)
1972{
1973 int i;
1974
1975 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
1976 if (GITS_BASER_TYPE(its->tables[i].val) == type)
1977 return &its->tables[i];
1978 }
1979
1980 return NULL;
1981}
1982
Marc Zyngier70cc81e2016-12-19 18:53:02 +00001983static bool its_alloc_table_entry(struct its_baser *baser, u32 id)
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001984{
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001985 struct page *page;
1986 u32 esz, idx;
1987 __le64 *table;
1988
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001989 /* Don't allow device id that exceeds single, flat table limit */
1990 esz = GITS_BASER_ENTRY_SIZE(baser->val);
1991 if (!(baser->val & GITS_BASER_INDIRECT))
Marc Zyngier70cc81e2016-12-19 18:53:02 +00001992 return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001993
1994 /* Compute 1st level table index & check if that exceeds table limit */
Marc Zyngier70cc81e2016-12-19 18:53:02 +00001995 idx = id >> ilog2(baser->psz / esz);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001996 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
1997 return false;
1998
1999 table = baser->base;
2000
2001 /* Allocate memory for 2nd level table */
2002 if (!table[idx]) {
2003 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz));
2004 if (!page)
2005 return false;
2006
2007 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
2008 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
Vladimir Murzin328191c2016-11-02 11:54:05 +00002009 gic_flush_dcache_to_poc(page_address(page), baser->psz);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002010
2011 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
2012
2013 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
2014 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
Vladimir Murzin328191c2016-11-02 11:54:05 +00002015 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002016
2017 /* Ensure updated table contents are visible to ITS hardware */
2018 dsb(sy);
2019 }
2020
2021 return true;
2022}
2023
Marc Zyngier70cc81e2016-12-19 18:53:02 +00002024static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
2025{
2026 struct its_baser *baser;
2027
2028 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
2029
2030 /* Don't allow device id that exceeds ITS hardware limit */
2031 if (!baser)
2032 return (ilog2(dev_id) < its->device_ids);
2033
2034 return its_alloc_table_entry(baser, dev_id);
2035}
2036
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002037static bool its_alloc_vpe_table(u32 vpe_id)
2038{
2039 struct its_node *its;
2040
2041 /*
2042 * Make sure the L2 tables are allocated on *all* v4 ITSs. We
2043 * could try and only do it on ITSs corresponding to devices
2044 * that have interrupts targeted at this VPE, but the
2045 * complexity becomes crazy (and you have tons of memory
2046 * anyway, right?).
2047 */
2048 list_for_each_entry(its, &its_nodes, entry) {
2049 struct its_baser *baser;
2050
2051 if (!its->is_v4)
2052 continue;
2053
2054 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
2055 if (!baser)
2056 return false;
2057
2058 if (!its_alloc_table_entry(baser, vpe_id))
2059 return false;
2060 }
2061
2062 return true;
2063}
2064
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002065static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002066 int nvecs, bool alloc_lpis)
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002067{
2068 struct its_device *dev;
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002069 unsigned long *lpi_map = NULL;
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002070 unsigned long flags;
Marc Zyngier591e5be2015-07-17 10:46:42 +01002071 u16 *col_map = NULL;
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002072 void *itt;
2073 int lpi_base;
2074 int nr_lpis;
Marc Zyngierc8481262014-12-12 10:51:24 +00002075 int nr_ites;
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002076 int sz;
2077
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002078 if (!its_alloc_device_table(its, dev_id))
Shanker Donthineni466b7d12016-03-09 22:10:49 -06002079 return NULL;
2080
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002081 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
Marc Zyngierc8481262014-12-12 10:51:24 +00002082 /*
2083 * At least one bit of EventID is being used, hence a minimum
2084 * of two entries. No, the architecture doesn't let you
2085 * express an ITT with a single entry.
2086 */
Will Deacon96555c42014-12-17 14:11:09 +00002087 nr_ites = max(2UL, roundup_pow_of_two(nvecs));
Marc Zyngierc8481262014-12-12 10:51:24 +00002088 sz = nr_ites * its->ite_size;
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002089 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
Yun Wu6c834122015-03-06 16:37:46 +00002090 itt = kzalloc(sz, GFP_KERNEL);
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002091 if (alloc_lpis) {
2092 lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
2093 if (lpi_map)
2094 col_map = kzalloc(sizeof(*col_map) * nr_lpis,
2095 GFP_KERNEL);
2096 } else {
2097 col_map = kzalloc(sizeof(*col_map) * nr_ites, GFP_KERNEL);
2098 nr_lpis = 0;
2099 lpi_base = 0;
2100 }
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002101
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002102 if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002103 kfree(dev);
2104 kfree(itt);
2105 kfree(lpi_map);
Marc Zyngier591e5be2015-07-17 10:46:42 +01002106 kfree(col_map);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002107 return NULL;
2108 }
2109
Vladimir Murzin328191c2016-11-02 11:54:05 +00002110 gic_flush_dcache_to_poc(itt, sz);
Marc Zyngier5a9a8912015-09-13 12:14:32 +01002111
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002112 dev->its = its;
2113 dev->itt = itt;
Marc Zyngierc8481262014-12-12 10:51:24 +00002114 dev->nr_ites = nr_ites;
Marc Zyngier591e5be2015-07-17 10:46:42 +01002115 dev->event_map.lpi_map = lpi_map;
2116 dev->event_map.col_map = col_map;
2117 dev->event_map.lpi_base = lpi_base;
2118 dev->event_map.nr_lpis = nr_lpis;
Marc Zyngierd011e4e2016-12-20 09:44:41 +00002119 mutex_init(&dev->event_map.vlpi_lock);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002120 dev->device_id = dev_id;
2121 INIT_LIST_HEAD(&dev->entry);
2122
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002123 raw_spin_lock_irqsave(&its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002124 list_add(&dev->entry, &its->its_device_list);
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002125 raw_spin_unlock_irqrestore(&its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002126
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002127 /* Map device to its ITT */
2128 its_send_mapd(dev, 1);
2129
2130 return dev;
2131}
2132
2133static void its_free_device(struct its_device *its_dev)
2134{
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002135 unsigned long flags;
2136
2137 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002138 list_del(&its_dev->entry);
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002139 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002140 kfree(its_dev->itt);
2141 kfree(its_dev);
2142}
Marc Zyngierb48ac832014-11-24 14:35:16 +00002143
2144static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
2145{
2146 int idx;
2147
Marc Zyngier591e5be2015-07-17 10:46:42 +01002148 idx = find_first_zero_bit(dev->event_map.lpi_map,
2149 dev->event_map.nr_lpis);
2150 if (idx == dev->event_map.nr_lpis)
Marc Zyngierb48ac832014-11-24 14:35:16 +00002151 return -ENOSPC;
2152
Marc Zyngier591e5be2015-07-17 10:46:42 +01002153 *hwirq = dev->event_map.lpi_base + idx;
2154 set_bit(idx, dev->event_map.lpi_map);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002155
Marc Zyngierb48ac832014-11-24 14:35:16 +00002156 return 0;
2157}
2158
Marc Zyngier54456db2015-07-28 14:46:21 +01002159static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
2160 int nvec, msi_alloc_info_t *info)
Marc Zyngiere8137f42015-03-06 16:37:42 +00002161{
Marc Zyngierb48ac832014-11-24 14:35:16 +00002162 struct its_node *its;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002163 struct its_device *its_dev;
Marc Zyngier54456db2015-07-28 14:46:21 +01002164 struct msi_domain_info *msi_info;
2165 u32 dev_id;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002166
Marc Zyngier54456db2015-07-28 14:46:21 +01002167 /*
2168 * We ignore "dev" entierely, and rely on the dev_id that has
2169 * been passed via the scratchpad. This limits this domain's
2170 * usefulness to upper layers that definitely know that they
2171 * are built on top of the ITS.
2172 */
2173 dev_id = info->scratchpad[0].ul;
2174
2175 msi_info = msi_get_domain_info(domain);
2176 its = msi_info->data;
2177
Marc Zyngier20b3d542016-12-20 15:23:22 +00002178 if (!gic_rdists->has_direct_lpi &&
2179 vpe_proxy.dev &&
2180 vpe_proxy.dev->its == its &&
2181 dev_id == vpe_proxy.dev->device_id) {
2182 /* Bad luck. Get yourself a better implementation */
2183 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
2184 dev_id);
2185 return -EINVAL;
2186 }
2187
Marc Zyngierf1304202015-07-28 14:46:18 +01002188 its_dev = its_find_device(its, dev_id);
Marc Zyngiere8137f42015-03-06 16:37:42 +00002189 if (its_dev) {
2190 /*
2191 * We already have seen this ID, probably through
2192 * another alias (PCI bridge of some sort). No need to
2193 * create the device.
2194 */
Marc Zyngierf1304202015-07-28 14:46:18 +01002195 pr_debug("Reusing ITT for devID %x\n", dev_id);
Marc Zyngiere8137f42015-03-06 16:37:42 +00002196 goto out;
2197 }
Marc Zyngierb48ac832014-11-24 14:35:16 +00002198
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002199 its_dev = its_create_device(its, dev_id, nvec, true);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002200 if (!its_dev)
2201 return -ENOMEM;
2202
Marc Zyngierf1304202015-07-28 14:46:18 +01002203 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
Marc Zyngiere8137f42015-03-06 16:37:42 +00002204out:
Marc Zyngierb48ac832014-11-24 14:35:16 +00002205 info->scratchpad[0].ptr = its_dev;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002206 return 0;
2207}
2208
Marc Zyngier54456db2015-07-28 14:46:21 +01002209static struct msi_domain_ops its_msi_domain_ops = {
2210 .msi_prepare = its_msi_prepare,
2211};
2212
Marc Zyngierb48ac832014-11-24 14:35:16 +00002213static int its_irq_gic_domain_alloc(struct irq_domain *domain,
2214 unsigned int virq,
2215 irq_hw_number_t hwirq)
2216{
Marc Zyngierf833f572015-10-13 12:51:33 +01002217 struct irq_fwspec fwspec;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002218
Marc Zyngierf833f572015-10-13 12:51:33 +01002219 if (irq_domain_get_of_node(domain->parent)) {
2220 fwspec.fwnode = domain->parent->fwnode;
2221 fwspec.param_count = 3;
2222 fwspec.param[0] = GIC_IRQ_TYPE_LPI;
2223 fwspec.param[1] = hwirq;
2224 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02002225 } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
2226 fwspec.fwnode = domain->parent->fwnode;
2227 fwspec.param_count = 2;
2228 fwspec.param[0] = hwirq;
2229 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
Marc Zyngierf833f572015-10-13 12:51:33 +01002230 } else {
2231 return -EINVAL;
2232 }
Marc Zyngierb48ac832014-11-24 14:35:16 +00002233
Marc Zyngierf833f572015-10-13 12:51:33 +01002234 return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002235}
2236
2237static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2238 unsigned int nr_irqs, void *args)
2239{
2240 msi_alloc_info_t *info = args;
2241 struct its_device *its_dev = info->scratchpad[0].ptr;
2242 irq_hw_number_t hwirq;
2243 int err;
2244 int i;
2245
2246 for (i = 0; i < nr_irqs; i++) {
2247 err = its_alloc_device_irq(its_dev, &hwirq);
2248 if (err)
2249 return err;
2250
2251 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq);
2252 if (err)
2253 return err;
2254
2255 irq_domain_set_hwirq_and_chip(domain, virq + i,
2256 hwirq, &its_irq_chip, its_dev);
Marc Zyngier0d224d32017-08-18 09:39:18 +01002257 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
Marc Zyngierf1304202015-07-28 14:46:18 +01002258 pr_debug("ID:%d pID:%d vID:%d\n",
2259 (int)(hwirq - its_dev->event_map.lpi_base),
2260 (int) hwirq, virq + i);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002261 }
2262
2263 return 0;
2264}
2265
Thomas Gleixner72491642017-09-13 23:29:10 +02002266static int its_irq_domain_activate(struct irq_domain *domain,
2267 struct irq_data *d, bool early)
Marc Zyngieraca268d2014-12-12 10:51:23 +00002268{
2269 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2270 u32 event = its_get_event_id(d);
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02002271 const struct cpumask *cpu_mask = cpu_online_mask;
Marc Zyngier0d224d32017-08-18 09:39:18 +01002272 int cpu;
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02002273
2274 /* get the cpu_mask of local node */
2275 if (its_dev->its->numa_node >= 0)
2276 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
Marc Zyngieraca268d2014-12-12 10:51:23 +00002277
Marc Zyngier591e5be2015-07-17 10:46:42 +01002278 /* Bind the LPI to the first possible CPU */
Marc Zyngier0d224d32017-08-18 09:39:18 +01002279 cpu = cpumask_first(cpu_mask);
2280 its_dev->event_map.col_map[event] = cpu;
2281 irq_data_update_effective_affinity(d, cpumask_of(cpu));
Marc Zyngier591e5be2015-07-17 10:46:42 +01002282
Marc Zyngieraca268d2014-12-12 10:51:23 +00002283 /* Map the GIC IRQ and event to the device */
Marc Zyngier6a25ad32016-12-20 15:52:26 +00002284 its_send_mapti(its_dev, d->hwirq, event);
Thomas Gleixner72491642017-09-13 23:29:10 +02002285 return 0;
Marc Zyngieraca268d2014-12-12 10:51:23 +00002286}
2287
2288static void its_irq_domain_deactivate(struct irq_domain *domain,
2289 struct irq_data *d)
2290{
2291 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2292 u32 event = its_get_event_id(d);
2293
2294 /* Stop the delivery of interrupts */
2295 its_send_discard(its_dev, event);
2296}
2297
Marc Zyngierb48ac832014-11-24 14:35:16 +00002298static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
2299 unsigned int nr_irqs)
2300{
2301 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
2302 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2303 int i;
2304
2305 for (i = 0; i < nr_irqs; i++) {
2306 struct irq_data *data = irq_domain_get_irq_data(domain,
2307 virq + i);
Marc Zyngieraca268d2014-12-12 10:51:23 +00002308 u32 event = its_get_event_id(data);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002309
2310 /* Mark interrupt index as unused */
Marc Zyngier591e5be2015-07-17 10:46:42 +01002311 clear_bit(event, its_dev->event_map.lpi_map);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002312
2313 /* Nuke the entry in the domain */
Marc Zyngier2da39942014-12-12 10:51:22 +00002314 irq_domain_reset_irq_data(data);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002315 }
2316
2317 /* If all interrupts have been freed, start mopping the floor */
Marc Zyngier591e5be2015-07-17 10:46:42 +01002318 if (bitmap_empty(its_dev->event_map.lpi_map,
2319 its_dev->event_map.nr_lpis)) {
Marc Zyngiercf2be8b2016-12-19 18:49:59 +00002320 its_lpi_free_chunks(its_dev->event_map.lpi_map,
2321 its_dev->event_map.lpi_base,
2322 its_dev->event_map.nr_lpis);
2323 kfree(its_dev->event_map.col_map);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002324
2325 /* Unmap device/itt */
2326 its_send_mapd(its_dev, 0);
2327 its_free_device(its_dev);
2328 }
2329
2330 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
2331}
2332
2333static const struct irq_domain_ops its_domain_ops = {
2334 .alloc = its_irq_domain_alloc,
2335 .free = its_irq_domain_free,
Marc Zyngieraca268d2014-12-12 10:51:23 +00002336 .activate = its_irq_domain_activate,
2337 .deactivate = its_irq_domain_deactivate,
Marc Zyngierb48ac832014-11-24 14:35:16 +00002338};
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00002339
Marc Zyngier20b3d542016-12-20 15:23:22 +00002340/*
2341 * This is insane.
2342 *
2343 * If a GICv4 doesn't implement Direct LPIs (which is extremely
2344 * likely), the only way to perform an invalidate is to use a fake
2345 * device to issue an INV command, implying that the LPI has first
2346 * been mapped to some event on that device. Since this is not exactly
2347 * cheap, we try to keep that mapping around as long as possible, and
2348 * only issue an UNMAP if we're short on available slots.
2349 *
2350 * Broken by design(tm).
2351 */
2352static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
2353{
2354 /* Already unmapped? */
2355 if (vpe->vpe_proxy_event == -1)
2356 return;
2357
2358 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
2359 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
2360
2361 /*
2362 * We don't track empty slots at all, so let's move the
2363 * next_victim pointer if we can quickly reuse that slot
2364 * instead of nuking an existing entry. Not clear that this is
2365 * always a win though, and this might just generate a ripple
2366 * effect... Let's just hope VPEs don't migrate too often.
2367 */
2368 if (vpe_proxy.vpes[vpe_proxy.next_victim])
2369 vpe_proxy.next_victim = vpe->vpe_proxy_event;
2370
2371 vpe->vpe_proxy_event = -1;
2372}
2373
2374static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
2375{
2376 if (!gic_rdists->has_direct_lpi) {
2377 unsigned long flags;
2378
2379 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2380 its_vpe_db_proxy_unmap_locked(vpe);
2381 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2382 }
2383}
2384
2385static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
2386{
2387 /* Already mapped? */
2388 if (vpe->vpe_proxy_event != -1)
2389 return;
2390
2391 /* This slot was already allocated. Kick the other VPE out. */
2392 if (vpe_proxy.vpes[vpe_proxy.next_victim])
2393 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
2394
2395 /* Map the new VPE instead */
2396 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
2397 vpe->vpe_proxy_event = vpe_proxy.next_victim;
2398 vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
2399
2400 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
2401 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
2402}
2403
Marc Zyngier958b90d2017-08-18 16:14:17 +01002404static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
2405{
2406 unsigned long flags;
2407 struct its_collection *target_col;
2408
2409 if (gic_rdists->has_direct_lpi) {
2410 void __iomem *rdbase;
2411
2412 rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
2413 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
2414 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2415 cpu_relax();
2416
2417 return;
2418 }
2419
2420 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2421
2422 its_vpe_db_proxy_map_locked(vpe);
2423
2424 target_col = &vpe_proxy.dev->its->collections[to];
2425 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
2426 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
2427
2428 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2429}
2430
Marc Zyngier3171a472016-12-20 15:17:28 +00002431static int its_vpe_set_affinity(struct irq_data *d,
2432 const struct cpumask *mask_val,
2433 bool force)
2434{
2435 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2436 int cpu = cpumask_first(mask_val);
2437
2438 /*
2439 * Changing affinity is mega expensive, so let's be as lazy as
Marc Zyngier20b3d542016-12-20 15:23:22 +00002440 * we can and only do it if we really have to. Also, if mapped
Marc Zyngier958b90d2017-08-18 16:14:17 +01002441 * into the proxy device, we need to move the doorbell
2442 * interrupt to its new location.
Marc Zyngier3171a472016-12-20 15:17:28 +00002443 */
2444 if (vpe->col_idx != cpu) {
Marc Zyngier958b90d2017-08-18 16:14:17 +01002445 int from = vpe->col_idx;
2446
Marc Zyngier3171a472016-12-20 15:17:28 +00002447 vpe->col_idx = cpu;
2448 its_send_vmovp(vpe);
Marc Zyngier958b90d2017-08-18 16:14:17 +01002449 its_vpe_db_proxy_move(vpe, from, cpu);
Marc Zyngier3171a472016-12-20 15:17:28 +00002450 }
2451
2452 return IRQ_SET_MASK_OK_DONE;
2453}
2454
Marc Zyngiere643d802016-12-20 15:09:31 +00002455static void its_vpe_schedule(struct its_vpe *vpe)
2456{
2457 void * __iomem vlpi_base = gic_data_rdist_vlpi_base();
2458 u64 val;
2459
2460 /* Schedule the VPE */
2461 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
2462 GENMASK_ULL(51, 12);
2463 val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
2464 val |= GICR_VPROPBASER_RaWb;
2465 val |= GICR_VPROPBASER_InnerShareable;
2466 gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2467
2468 val = virt_to_phys(page_address(vpe->vpt_page)) &
2469 GENMASK_ULL(51, 16);
2470 val |= GICR_VPENDBASER_RaWaWb;
2471 val |= GICR_VPENDBASER_NonShareable;
2472 /*
2473 * There is no good way of finding out if the pending table is
2474 * empty as we can race against the doorbell interrupt very
2475 * easily. So in the end, vpe->pending_last is only an
2476 * indication that the vcpu has something pending, not one
2477 * that the pending table is empty. A good implementation
2478 * would be able to read its coarse map pretty quickly anyway,
2479 * making this a tolerable issue.
2480 */
2481 val |= GICR_VPENDBASER_PendingLast;
2482 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
2483 val |= GICR_VPENDBASER_Valid;
2484 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2485}
2486
2487static void its_vpe_deschedule(struct its_vpe *vpe)
2488{
2489 void * __iomem vlpi_base = gic_data_rdist_vlpi_base();
2490 u32 count = 1000000; /* 1s! */
2491 bool clean;
2492 u64 val;
2493
2494 /* We're being scheduled out */
2495 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2496 val &= ~GICR_VPENDBASER_Valid;
2497 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2498
2499 do {
2500 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2501 clean = !(val & GICR_VPENDBASER_Dirty);
2502 if (!clean) {
2503 count--;
2504 cpu_relax();
2505 udelay(1);
2506 }
2507 } while (!clean && count);
2508
2509 if (unlikely(!clean && !count)) {
2510 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
2511 vpe->idai = false;
2512 vpe->pending_last = true;
2513 } else {
2514 vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
2515 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
2516 }
2517}
2518
Marc Zyngier40619a22017-10-08 15:16:09 +01002519static void its_vpe_invall(struct its_vpe *vpe)
2520{
2521 struct its_node *its;
2522
2523 list_for_each_entry(its, &its_nodes, entry) {
2524 if (!its->is_v4)
2525 continue;
2526
Marc Zyngier2247e1b2017-10-08 18:50:36 +01002527 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
2528 continue;
2529
Marc Zyngier40619a22017-10-08 15:16:09 +01002530 its_send_vinvall(its, vpe);
2531 }
2532}
2533
Marc Zyngiere643d802016-12-20 15:09:31 +00002534static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
2535{
2536 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2537 struct its_cmd_info *info = vcpu_info;
2538
2539 switch (info->cmd_type) {
2540 case SCHEDULE_VPE:
2541 its_vpe_schedule(vpe);
2542 return 0;
2543
2544 case DESCHEDULE_VPE:
2545 its_vpe_deschedule(vpe);
2546 return 0;
2547
Marc Zyngier5e2f7642016-12-20 15:10:50 +00002548 case INVALL_VPE:
Marc Zyngier40619a22017-10-08 15:16:09 +01002549 its_vpe_invall(vpe);
Marc Zyngier5e2f7642016-12-20 15:10:50 +00002550 return 0;
2551
Marc Zyngiere643d802016-12-20 15:09:31 +00002552 default:
2553 return -EINVAL;
2554 }
2555}
2556
Marc Zyngier20b3d542016-12-20 15:23:22 +00002557static void its_vpe_send_cmd(struct its_vpe *vpe,
2558 void (*cmd)(struct its_device *, u32))
2559{
2560 unsigned long flags;
2561
2562 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2563
2564 its_vpe_db_proxy_map_locked(vpe);
2565 cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
2566
2567 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2568}
2569
Marc Zyngierf6a91da2016-12-20 15:20:38 +00002570static void its_vpe_send_inv(struct irq_data *d)
2571{
2572 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
Marc Zyngierf6a91da2016-12-20 15:20:38 +00002573
Marc Zyngier20b3d542016-12-20 15:23:22 +00002574 if (gic_rdists->has_direct_lpi) {
2575 void __iomem *rdbase;
2576
2577 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
2578 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR);
2579 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2580 cpu_relax();
2581 } else {
2582 its_vpe_send_cmd(vpe, its_send_inv);
2583 }
Marc Zyngierf6a91da2016-12-20 15:20:38 +00002584}
2585
2586static void its_vpe_mask_irq(struct irq_data *d)
2587{
2588 /*
2589 * We need to unmask the LPI, which is described by the parent
2590 * irq_data. Instead of calling into the parent (which won't
2591 * exactly do the right thing, let's simply use the
2592 * parent_data pointer. Yes, I'm naughty.
2593 */
2594 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
2595 its_vpe_send_inv(d);
2596}
2597
2598static void its_vpe_unmask_irq(struct irq_data *d)
2599{
2600 /* Same hack as above... */
2601 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
2602 its_vpe_send_inv(d);
2603}
2604
Marc Zyngiere57a3e282017-07-31 14:47:24 +01002605static int its_vpe_set_irqchip_state(struct irq_data *d,
2606 enum irqchip_irq_state which,
2607 bool state)
2608{
2609 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2610
2611 if (which != IRQCHIP_STATE_PENDING)
2612 return -EINVAL;
2613
2614 if (gic_rdists->has_direct_lpi) {
2615 void __iomem *rdbase;
2616
2617 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
2618 if (state) {
2619 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
2620 } else {
2621 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
2622 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2623 cpu_relax();
2624 }
2625 } else {
2626 if (state)
2627 its_vpe_send_cmd(vpe, its_send_int);
2628 else
2629 its_vpe_send_cmd(vpe, its_send_clear);
2630 }
2631
2632 return 0;
2633}
2634
Marc Zyngier8fff27a2016-12-20 13:41:55 +00002635static struct irq_chip its_vpe_irq_chip = {
2636 .name = "GICv4-vpe",
Marc Zyngierf6a91da2016-12-20 15:20:38 +00002637 .irq_mask = its_vpe_mask_irq,
2638 .irq_unmask = its_vpe_unmask_irq,
2639 .irq_eoi = irq_chip_eoi_parent,
Marc Zyngier3171a472016-12-20 15:17:28 +00002640 .irq_set_affinity = its_vpe_set_affinity,
Marc Zyngiere57a3e282017-07-31 14:47:24 +01002641 .irq_set_irqchip_state = its_vpe_set_irqchip_state,
Marc Zyngiere643d802016-12-20 15:09:31 +00002642 .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
Marc Zyngier8fff27a2016-12-20 13:41:55 +00002643};
2644
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002645static int its_vpe_id_alloc(void)
2646{
2647 return ida_simple_get(&its_vpeid_ida, 0, 1 << 16, GFP_KERNEL);
2648}
2649
2650static void its_vpe_id_free(u16 id)
2651{
2652 ida_simple_remove(&its_vpeid_ida, id);
2653}
2654
2655static int its_vpe_init(struct its_vpe *vpe)
2656{
2657 struct page *vpt_page;
2658 int vpe_id;
2659
2660 /* Allocate vpe_id */
2661 vpe_id = its_vpe_id_alloc();
2662 if (vpe_id < 0)
2663 return vpe_id;
2664
2665 /* Allocate VPT */
2666 vpt_page = its_allocate_pending_table(GFP_KERNEL);
2667 if (!vpt_page) {
2668 its_vpe_id_free(vpe_id);
2669 return -ENOMEM;
2670 }
2671
2672 if (!its_alloc_vpe_table(vpe_id)) {
2673 its_vpe_id_free(vpe_id);
2674 its_free_pending_table(vpe->vpt_page);
2675 return -ENOMEM;
2676 }
2677
2678 vpe->vpe_id = vpe_id;
2679 vpe->vpt_page = vpt_page;
Marc Zyngier20b3d542016-12-20 15:23:22 +00002680 vpe->vpe_proxy_event = -1;
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002681
2682 return 0;
2683}
2684
2685static void its_vpe_teardown(struct its_vpe *vpe)
2686{
Marc Zyngier20b3d542016-12-20 15:23:22 +00002687 its_vpe_db_proxy_unmap(vpe);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002688 its_vpe_id_free(vpe->vpe_id);
2689 its_free_pending_table(vpe->vpt_page);
2690}
2691
2692static void its_vpe_irq_domain_free(struct irq_domain *domain,
2693 unsigned int virq,
2694 unsigned int nr_irqs)
2695{
2696 struct its_vm *vm = domain->host_data;
2697 int i;
2698
2699 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
2700
2701 for (i = 0; i < nr_irqs; i++) {
2702 struct irq_data *data = irq_domain_get_irq_data(domain,
2703 virq + i);
2704 struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
2705
2706 BUG_ON(vm != vpe->its_vm);
2707
2708 clear_bit(data->hwirq, vm->db_bitmap);
2709 its_vpe_teardown(vpe);
2710 irq_domain_reset_irq_data(data);
2711 }
2712
2713 if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
2714 its_lpi_free_chunks(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
2715 its_free_prop_table(vm->vprop_page);
2716 }
2717}
2718
2719static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2720 unsigned int nr_irqs, void *args)
2721{
2722 struct its_vm *vm = args;
2723 unsigned long *bitmap;
2724 struct page *vprop_page;
2725 int base, nr_ids, i, err = 0;
2726
2727 BUG_ON(!vm);
2728
2729 bitmap = its_lpi_alloc_chunks(nr_irqs, &base, &nr_ids);
2730 if (!bitmap)
2731 return -ENOMEM;
2732
2733 if (nr_ids < nr_irqs) {
2734 its_lpi_free_chunks(bitmap, base, nr_ids);
2735 return -ENOMEM;
2736 }
2737
2738 vprop_page = its_allocate_prop_table(GFP_KERNEL);
2739 if (!vprop_page) {
2740 its_lpi_free_chunks(bitmap, base, nr_ids);
2741 return -ENOMEM;
2742 }
2743
2744 vm->db_bitmap = bitmap;
2745 vm->db_lpi_base = base;
2746 vm->nr_db_lpis = nr_ids;
2747 vm->vprop_page = vprop_page;
2748
2749 for (i = 0; i < nr_irqs; i++) {
2750 vm->vpes[i]->vpe_db_lpi = base + i;
2751 err = its_vpe_init(vm->vpes[i]);
2752 if (err)
2753 break;
2754 err = its_irq_gic_domain_alloc(domain, virq + i,
2755 vm->vpes[i]->vpe_db_lpi);
2756 if (err)
2757 break;
2758 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
2759 &its_vpe_irq_chip, vm->vpes[i]);
2760 set_bit(i, bitmap);
2761 }
2762
2763 if (err) {
2764 if (i > 0)
2765 its_vpe_irq_domain_free(domain, virq, i - 1);
2766
2767 its_lpi_free_chunks(bitmap, base, nr_ids);
2768 its_free_prop_table(vprop_page);
2769 }
2770
2771 return err;
2772}
2773
Thomas Gleixner72491642017-09-13 23:29:10 +02002774static int its_vpe_irq_domain_activate(struct irq_domain *domain,
2775 struct irq_data *d, bool early)
Marc Zyngiereb781922016-12-20 14:47:05 +00002776{
2777 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
Marc Zyngier40619a22017-10-08 15:16:09 +01002778 struct its_node *its;
Marc Zyngiereb781922016-12-20 14:47:05 +00002779
Marc Zyngier2247e1b2017-10-08 18:50:36 +01002780 /* If we use the list map, we issue VMAPP on demand... */
2781 if (its_list_map)
2782 return true;
2783
Marc Zyngiereb781922016-12-20 14:47:05 +00002784 /* Map the VPE to the first possible CPU */
2785 vpe->col_idx = cpumask_first(cpu_online_mask);
Marc Zyngier40619a22017-10-08 15:16:09 +01002786
2787 list_for_each_entry(its, &its_nodes, entry) {
2788 if (!its->is_v4)
2789 continue;
2790
Marc Zyngier75fd9512017-10-08 18:46:39 +01002791 its_send_vmapp(its, vpe, true);
Marc Zyngier40619a22017-10-08 15:16:09 +01002792 its_send_vinvall(its, vpe);
2793 }
2794
Thomas Gleixner72491642017-09-13 23:29:10 +02002795 return 0;
Marc Zyngiereb781922016-12-20 14:47:05 +00002796}
2797
2798static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
2799 struct irq_data *d)
2800{
2801 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
Marc Zyngier75fd9512017-10-08 18:46:39 +01002802 struct its_node *its;
Marc Zyngiereb781922016-12-20 14:47:05 +00002803
Marc Zyngier2247e1b2017-10-08 18:50:36 +01002804 /*
2805 * If we use the list map, we unmap the VPE once no VLPIs are
2806 * associated with the VM.
2807 */
2808 if (its_list_map)
2809 return;
2810
Marc Zyngier75fd9512017-10-08 18:46:39 +01002811 list_for_each_entry(its, &its_nodes, entry) {
2812 if (!its->is_v4)
2813 continue;
2814
2815 its_send_vmapp(its, vpe, false);
2816 }
Marc Zyngiereb781922016-12-20 14:47:05 +00002817}
2818
Marc Zyngier8fff27a2016-12-20 13:41:55 +00002819static const struct irq_domain_ops its_vpe_domain_ops = {
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002820 .alloc = its_vpe_irq_domain_alloc,
2821 .free = its_vpe_irq_domain_free,
Marc Zyngiereb781922016-12-20 14:47:05 +00002822 .activate = its_vpe_irq_domain_activate,
2823 .deactivate = its_vpe_irq_domain_deactivate,
Marc Zyngier8fff27a2016-12-20 13:41:55 +00002824};
2825
Yun Wu4559fbb2015-03-06 16:37:50 +00002826static int its_force_quiescent(void __iomem *base)
2827{
2828 u32 count = 1000000; /* 1s */
2829 u32 val;
2830
2831 val = readl_relaxed(base + GITS_CTLR);
David Daney7611da82016-08-18 15:41:58 -07002832 /*
2833 * GIC architecture specification requires the ITS to be both
2834 * disabled and quiescent for writes to GITS_BASER<n> or
2835 * GITS_CBASER to not have UNPREDICTABLE results.
2836 */
2837 if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
Yun Wu4559fbb2015-03-06 16:37:50 +00002838 return 0;
2839
2840 /* Disable the generation of all interrupts to this ITS */
Marc Zyngierd51c4b42017-06-27 21:24:25 +01002841 val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
Yun Wu4559fbb2015-03-06 16:37:50 +00002842 writel_relaxed(val, base + GITS_CTLR);
2843
2844 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
2845 while (1) {
2846 val = readl_relaxed(base + GITS_CTLR);
2847 if (val & GITS_CTLR_QUIESCENT)
2848 return 0;
2849
2850 count--;
2851 if (!count)
2852 return -EBUSY;
2853
2854 cpu_relax();
2855 udelay(1);
2856 }
2857}
2858
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01002859static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
Robert Richter94100972015-09-21 22:58:38 +02002860{
2861 struct its_node *its = data;
2862
Ard Biesheuvelfa150012017-10-17 17:55:54 +01002863 /* erratum 22375: only alloc 8MB table size */
2864 its->device_ids = 0x14; /* 20 bits, 8MB */
Robert Richter94100972015-09-21 22:58:38 +02002865 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01002866
2867 return true;
Robert Richter94100972015-09-21 22:58:38 +02002868}
2869
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01002870static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02002871{
2872 struct its_node *its = data;
2873
2874 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01002875
2876 return true;
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02002877}
2878
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01002879static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
Shanker Donthineni90922a22017-03-07 08:20:38 -06002880{
2881 struct its_node *its = data;
2882
2883 /* On QDF2400, the size of the ITE is 16Bytes */
2884 its->ite_size = 16;
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01002885
2886 return true;
Shanker Donthineni90922a22017-03-07 08:20:38 -06002887}
2888
Ard Biesheuvel558b0162017-10-17 17:55:56 +01002889static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
2890{
2891 struct its_node *its = its_dev->its;
2892
2893 /*
2894 * The Socionext Synquacer SoC has a so-called 'pre-ITS',
2895 * which maps 32-bit writes targeted at a separate window of
2896 * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
2897 * with device ID taken from bits [device_id_bits + 1:2] of
2898 * the window offset.
2899 */
2900 return its->pre_its_base + (its_dev->device_id << 2);
2901}
2902
2903static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
2904{
2905 struct its_node *its = data;
2906 u32 pre_its_window[2];
2907 u32 ids;
2908
2909 if (!fwnode_property_read_u32_array(its->fwnode_handle,
2910 "socionext,synquacer-pre-its",
2911 pre_its_window,
2912 ARRAY_SIZE(pre_its_window))) {
2913
2914 its->pre_its_base = pre_its_window[0];
2915 its->get_msi_base = its_irq_get_msi_base_pre_its;
2916
2917 ids = ilog2(pre_its_window[1]) - 2;
2918 if (its->device_ids > ids)
2919 its->device_ids = ids;
2920
2921 /* the pre-ITS breaks isolation, so disable MSI remapping */
2922 its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
2923 return true;
2924 }
2925 return false;
2926}
2927
Marc Zyngier5c9a8822017-07-28 21:20:37 +01002928static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
2929{
2930 struct its_node *its = data;
2931
2932 /*
2933 * Hip07 insists on using the wrong address for the VLPI
2934 * page. Trick it into doing the right thing...
2935 */
2936 its->vlpi_redist_offset = SZ_128K;
2937 return true;
2938}
2939
Robert Richter67510cc2015-09-21 22:58:37 +02002940static const struct gic_quirk its_quirks[] = {
Robert Richter94100972015-09-21 22:58:38 +02002941#ifdef CONFIG_CAVIUM_ERRATUM_22375
2942 {
2943 .desc = "ITS: Cavium errata 22375, 24313",
2944 .iidr = 0xa100034c, /* ThunderX pass 1.x */
2945 .mask = 0xffff0fff,
2946 .init = its_enable_quirk_cavium_22375,
2947 },
2948#endif
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02002949#ifdef CONFIG_CAVIUM_ERRATUM_23144
2950 {
2951 .desc = "ITS: Cavium erratum 23144",
2952 .iidr = 0xa100034c, /* ThunderX pass 1.x */
2953 .mask = 0xffff0fff,
2954 .init = its_enable_quirk_cavium_23144,
2955 },
2956#endif
Shanker Donthineni90922a22017-03-07 08:20:38 -06002957#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
2958 {
2959 .desc = "ITS: QDF2400 erratum 0065",
2960 .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */
2961 .mask = 0xffffffff,
2962 .init = its_enable_quirk_qdf2400_e0065,
2963 },
2964#endif
Ard Biesheuvel558b0162017-10-17 17:55:56 +01002965#ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
2966 {
2967 /*
2968 * The Socionext Synquacer SoC incorporates ARM's own GIC-500
2969 * implementation, but with a 'pre-ITS' added that requires
2970 * special handling in software.
2971 */
2972 .desc = "ITS: Socionext Synquacer pre-ITS",
2973 .iidr = 0x0001143b,
2974 .mask = 0xffffffff,
2975 .init = its_enable_quirk_socionext_synquacer,
2976 },
2977#endif
Marc Zyngier5c9a8822017-07-28 21:20:37 +01002978#ifdef CONFIG_HISILICON_ERRATUM_161600802
2979 {
2980 .desc = "ITS: Hip07 erratum 161600802",
2981 .iidr = 0x00000004,
2982 .mask = 0xffffffff,
2983 .init = its_enable_quirk_hip07_161600802,
2984 },
2985#endif
Robert Richter67510cc2015-09-21 22:58:37 +02002986 {
2987 }
2988};
2989
2990static void its_enable_quirks(struct its_node *its)
2991{
2992 u32 iidr = readl_relaxed(its->base + GITS_IIDR);
2993
2994 gic_enable_quirks(iidr, its_quirks, its);
2995}
2996
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02002997static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02002998{
2999 struct irq_domain *inner_domain;
3000 struct msi_domain_info *info;
3001
3002 info = kzalloc(sizeof(*info), GFP_KERNEL);
3003 if (!info)
3004 return -ENOMEM;
3005
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003006 inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its);
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003007 if (!inner_domain) {
3008 kfree(info);
3009 return -ENOMEM;
3010 }
3011
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003012 inner_domain->parent = its_parent;
Marc Zyngier96f0d932017-06-22 11:42:50 +01003013 irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
Ard Biesheuvel558b0162017-10-17 17:55:56 +01003014 inner_domain->flags |= its->msi_domain_flags;
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003015 info->ops = &its_msi_domain_ops;
3016 info->data = its;
3017 inner_domain->host_data = info;
3018
3019 return 0;
3020}
3021
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003022static int its_init_vpe_domain(void)
3023{
Marc Zyngier20b3d542016-12-20 15:23:22 +00003024 struct its_node *its;
3025 u32 devid;
3026 int entries;
3027
3028 if (gic_rdists->has_direct_lpi) {
3029 pr_info("ITS: Using DirectLPI for VPE invalidation\n");
3030 return 0;
3031 }
3032
3033 /* Any ITS will do, even if not v4 */
3034 its = list_first_entry(&its_nodes, struct its_node, entry);
3035
3036 entries = roundup_pow_of_two(nr_cpu_ids);
3037 vpe_proxy.vpes = kzalloc(sizeof(*vpe_proxy.vpes) * entries,
3038 GFP_KERNEL);
3039 if (!vpe_proxy.vpes) {
3040 pr_err("ITS: Can't allocate GICv4 proxy device array\n");
3041 return -ENOMEM;
3042 }
3043
3044 /* Use the last possible DevID */
3045 devid = GENMASK(its->device_ids - 1, 0);
3046 vpe_proxy.dev = its_create_device(its, devid, entries, false);
3047 if (!vpe_proxy.dev) {
3048 kfree(vpe_proxy.vpes);
3049 pr_err("ITS: Can't allocate GICv4 proxy device\n");
3050 return -ENOMEM;
3051 }
3052
3053 BUG_ON(entries != vpe_proxy.dev->nr_ites);
3054
3055 raw_spin_lock_init(&vpe_proxy.lock);
3056 vpe_proxy.next_victim = 0;
3057 pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
3058 devid, vpe_proxy.dev->nr_ites);
3059
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003060 return 0;
3061}
3062
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003063static int __init its_compute_its_list_map(struct resource *res,
3064 void __iomem *its_base)
3065{
3066 int its_number;
3067 u32 ctlr;
3068
3069 /*
3070 * This is assumed to be done early enough that we're
3071 * guaranteed to be single-threaded, hence no
3072 * locking. Should this change, we should address
3073 * this.
3074 */
Marc Zyngierab604912017-10-08 18:48:06 +01003075 its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
3076 if (its_number >= GICv4_ITS_LIST_MAX) {
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003077 pr_err("ITS@%pa: No ITSList entry available!\n",
3078 &res->start);
3079 return -EINVAL;
3080 }
3081
3082 ctlr = readl_relaxed(its_base + GITS_CTLR);
3083 ctlr &= ~GITS_CTLR_ITS_NUMBER;
3084 ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
3085 writel_relaxed(ctlr, its_base + GITS_CTLR);
3086 ctlr = readl_relaxed(its_base + GITS_CTLR);
3087 if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
3088 its_number = ctlr & GITS_CTLR_ITS_NUMBER;
3089 its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
3090 }
3091
3092 if (test_and_set_bit(its_number, &its_list_map)) {
3093 pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
3094 &res->start, its_number);
3095 return -EINVAL;
3096 }
3097
3098 return its_number;
3099}
3100
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003101static int __init its_probe_one(struct resource *res,
3102 struct fwnode_handle *handle, int numa_node)
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003103{
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003104 struct its_node *its;
3105 void __iomem *its_base;
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003106 u32 val, ctlr;
3107 u64 baser, tmp, typer;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003108 int err;
3109
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003110 its_base = ioremap(res->start, resource_size(res));
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003111 if (!its_base) {
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003112 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003113 return -ENOMEM;
3114 }
3115
3116 val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
3117 if (val != 0x30 && val != 0x40) {
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003118 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003119 err = -ENODEV;
3120 goto out_unmap;
3121 }
3122
Yun Wu4559fbb2015-03-06 16:37:50 +00003123 err = its_force_quiescent(its_base);
3124 if (err) {
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003125 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
Yun Wu4559fbb2015-03-06 16:37:50 +00003126 goto out_unmap;
3127 }
3128
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003129 pr_info("ITS %pR\n", res);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003130
3131 its = kzalloc(sizeof(*its), GFP_KERNEL);
3132 if (!its) {
3133 err = -ENOMEM;
3134 goto out_unmap;
3135 }
3136
3137 raw_spin_lock_init(&its->lock);
3138 INIT_LIST_HEAD(&its->entry);
3139 INIT_LIST_HEAD(&its->its_device_list);
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003140 typer = gic_read_typer(its_base + GITS_TYPER);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003141 its->base = its_base;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003142 its->phys_base = res->start;
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003143 its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer);
Ard Biesheuvelfa150012017-10-17 17:55:54 +01003144 its->device_ids = GITS_TYPER_DEVBITS(typer);
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003145 its->is_v4 = !!(typer & GITS_TYPER_VLPIS);
3146 if (its->is_v4) {
3147 if (!(typer & GITS_TYPER_VMOVP)) {
3148 err = its_compute_its_list_map(res, its_base);
3149 if (err < 0)
3150 goto out_free_its;
3151
Marc Zyngierdebf6d02017-10-08 18:44:42 +01003152 its->list_nr = err;
3153
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003154 pr_info("ITS@%pa: Using ITS number %d\n",
3155 &res->start, err);
3156 } else {
3157 pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
3158 }
3159 }
3160
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003161 its->numa_node = numa_node;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003162
Robert Richter5bc13c22017-02-01 18:38:25 +01003163 its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
3164 get_order(ITS_CMD_QUEUE_SZ));
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003165 if (!its->cmd_base) {
3166 err = -ENOMEM;
3167 goto out_free_its;
3168 }
3169 its->cmd_write = its->cmd_base;
Ard Biesheuvel558b0162017-10-17 17:55:56 +01003170 its->fwnode_handle = handle;
3171 its->get_msi_base = its_irq_get_msi_base;
3172 its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003173
Robert Richter67510cc2015-09-21 22:58:37 +02003174 its_enable_quirks(its);
3175
Shanker Donthineni0e0b0f62016-06-06 18:17:31 -05003176 err = its_alloc_tables(its);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003177 if (err)
3178 goto out_free_cmd;
3179
3180 err = its_alloc_collections(its);
3181 if (err)
3182 goto out_free_tables;
3183
3184 baser = (virt_to_phys(its->cmd_base) |
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06003185 GITS_CBASER_RaWaWb |
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003186 GITS_CBASER_InnerShareable |
3187 (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
3188 GITS_CBASER_VALID);
3189
Vladimir Murzin0968a612016-11-02 11:54:06 +00003190 gits_write_cbaser(baser, its->base + GITS_CBASER);
3191 tmp = gits_read_cbaser(its->base + GITS_CBASER);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003192
Marc Zyngier4ad3e362015-03-27 14:15:04 +00003193 if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
Marc Zyngier241a3862015-03-27 14:15:05 +00003194 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
3195 /*
3196 * The HW reports non-shareable, we must
3197 * remove the cacheability attributes as
3198 * well.
3199 */
3200 baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
3201 GITS_CBASER_CACHEABILITY_MASK);
3202 baser |= GITS_CBASER_nC;
Vladimir Murzin0968a612016-11-02 11:54:06 +00003203 gits_write_cbaser(baser, its->base + GITS_CBASER);
Marc Zyngier241a3862015-03-27 14:15:05 +00003204 }
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003205 pr_info("ITS: using cache flushing for cmd queue\n");
3206 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
3207 }
3208
Vladimir Murzin0968a612016-11-02 11:54:06 +00003209 gits_write_cwriter(0, its->base + GITS_CWRITER);
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003210 ctlr = readl_relaxed(its->base + GITS_CTLR);
Marc Zyngierd51c4b42017-06-27 21:24:25 +01003211 ctlr |= GITS_CTLR_ENABLE;
3212 if (its->is_v4)
3213 ctlr |= GITS_CTLR_ImDe;
3214 writel_relaxed(ctlr, its->base + GITS_CTLR);
Marc Zyngier241a3862015-03-27 14:15:05 +00003215
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003216 err = its_init_domain(handle, its);
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003217 if (err)
3218 goto out_free_tables;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003219
3220 spin_lock(&its_lock);
3221 list_add(&its->entry, &its_nodes);
3222 spin_unlock(&its_lock);
3223
3224 return 0;
3225
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003226out_free_tables:
3227 its_free_tables(its);
3228out_free_cmd:
Robert Richter5bc13c22017-02-01 18:38:25 +01003229 free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003230out_free_its:
3231 kfree(its);
3232out_unmap:
3233 iounmap(its_base);
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003234 pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003235 return err;
3236}
3237
3238static bool gic_rdists_supports_plpis(void)
3239{
Marc Zyngier589ce5f2016-10-14 15:13:07 +01003240 return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003241}
3242
3243int its_cpu_init(void)
3244{
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003245 if (!list_empty(&its_nodes)) {
Vladimir Murzin16acae72015-03-06 16:37:40 +00003246 if (!gic_rdists_supports_plpis()) {
3247 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
3248 return -ENXIO;
3249 }
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003250 its_cpu_init_lpis();
3251 its_cpu_init_collection();
3252 }
3253
3254 return 0;
3255}
3256
Arvind Yadav935bba72017-06-22 16:05:30 +05303257static const struct of_device_id its_device_id[] = {
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003258 { .compatible = "arm,gic-v3-its", },
3259 {},
3260};
3261
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003262static int __init its_of_probe(struct device_node *node)
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003263{
3264 struct device_node *np;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003265 struct resource res;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003266
3267 for (np = of_find_matching_node(node, its_device_id); np;
3268 np = of_find_matching_node(np, its_device_id)) {
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003269 if (!of_property_read_bool(np, "msi-controller")) {
Rob Herringe81f54c2017-07-18 16:43:10 -05003270 pr_warn("%pOF: no msi-controller property, ITS ignored\n",
3271 np);
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003272 continue;
3273 }
3274
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003275 if (of_address_to_resource(np, 0, &res)) {
Rob Herringe81f54c2017-07-18 16:43:10 -05003276 pr_warn("%pOF: no regs?\n", np);
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003277 continue;
3278 }
3279
3280 its_probe_one(&res, &np->fwnode, of_node_to_nid(np));
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003281 }
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003282 return 0;
3283}
3284
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003285#ifdef CONFIG_ACPI
3286
3287#define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
3288
Robert Richterd1ce2632017-07-12 15:25:09 +02003289#ifdef CONFIG_ACPI_NUMA
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303290struct its_srat_map {
3291 /* numa node id */
3292 u32 numa_node;
3293 /* GIC ITS ID */
3294 u32 its_id;
3295};
3296
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003297static struct its_srat_map *its_srat_maps __initdata;
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303298static int its_in_srat __initdata;
3299
3300static int __init acpi_get_its_numa_node(u32 its_id)
3301{
3302 int i;
3303
3304 for (i = 0; i < its_in_srat; i++) {
3305 if (its_id == its_srat_maps[i].its_id)
3306 return its_srat_maps[i].numa_node;
3307 }
3308 return NUMA_NO_NODE;
3309}
3310
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003311static int __init gic_acpi_match_srat_its(struct acpi_subtable_header *header,
3312 const unsigned long end)
3313{
3314 return 0;
3315}
3316
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303317static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header,
3318 const unsigned long end)
3319{
3320 int node;
3321 struct acpi_srat_gic_its_affinity *its_affinity;
3322
3323 its_affinity = (struct acpi_srat_gic_its_affinity *)header;
3324 if (!its_affinity)
3325 return -EINVAL;
3326
3327 if (its_affinity->header.length < sizeof(*its_affinity)) {
3328 pr_err("SRAT: Invalid header length %d in ITS affinity\n",
3329 its_affinity->header.length);
3330 return -EINVAL;
3331 }
3332
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303333 node = acpi_map_pxm_to_node(its_affinity->proximity_domain);
3334
3335 if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
3336 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
3337 return 0;
3338 }
3339
3340 its_srat_maps[its_in_srat].numa_node = node;
3341 its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
3342 its_in_srat++;
3343 pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
3344 its_affinity->proximity_domain, its_affinity->its_id, node);
3345
3346 return 0;
3347}
3348
3349static void __init acpi_table_parse_srat_its(void)
3350{
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003351 int count;
3352
3353 count = acpi_table_parse_entries(ACPI_SIG_SRAT,
3354 sizeof(struct acpi_table_srat),
3355 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
3356 gic_acpi_match_srat_its, 0);
3357 if (count <= 0)
3358 return;
3359
3360 its_srat_maps = kmalloc(count * sizeof(struct its_srat_map),
3361 GFP_KERNEL);
3362 if (!its_srat_maps) {
3363 pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n");
3364 return;
3365 }
3366
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303367 acpi_table_parse_entries(ACPI_SIG_SRAT,
3368 sizeof(struct acpi_table_srat),
3369 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
3370 gic_acpi_parse_srat_its, 0);
3371}
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003372
3373/* free the its_srat_maps after ITS probing */
3374static void __init acpi_its_srat_maps_free(void)
3375{
3376 kfree(its_srat_maps);
3377}
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303378#else
3379static void __init acpi_table_parse_srat_its(void) { }
3380static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003381static void __init acpi_its_srat_maps_free(void) { }
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303382#endif
3383
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003384static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header,
3385 const unsigned long end)
3386{
3387 struct acpi_madt_generic_translator *its_entry;
3388 struct fwnode_handle *dom_handle;
3389 struct resource res;
3390 int err;
3391
3392 its_entry = (struct acpi_madt_generic_translator *)header;
3393 memset(&res, 0, sizeof(res));
3394 res.start = its_entry->base_address;
3395 res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
3396 res.flags = IORESOURCE_MEM;
3397
3398 dom_handle = irq_domain_alloc_fwnode((void *)its_entry->base_address);
3399 if (!dom_handle) {
3400 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
3401 &res.start);
3402 return -ENOMEM;
3403 }
3404
3405 err = iort_register_domain_token(its_entry->translation_id, dom_handle);
3406 if (err) {
3407 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
3408 &res.start, its_entry->translation_id);
3409 goto dom_err;
3410 }
3411
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303412 err = its_probe_one(&res, dom_handle,
3413 acpi_get_its_numa_node(its_entry->translation_id));
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003414 if (!err)
3415 return 0;
3416
3417 iort_deregister_domain_token(its_entry->translation_id);
3418dom_err:
3419 irq_domain_free_fwnode(dom_handle);
3420 return err;
3421}
3422
3423static void __init its_acpi_probe(void)
3424{
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303425 acpi_table_parse_srat_its();
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003426 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
3427 gic_acpi_parse_madt_its, 0);
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003428 acpi_its_srat_maps_free();
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003429}
3430#else
3431static void __init its_acpi_probe(void) { }
3432#endif
3433
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003434int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
3435 struct irq_domain *parent_domain)
3436{
3437 struct device_node *of_node;
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003438 struct its_node *its;
3439 bool has_v4 = false;
3440 int err;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003441
3442 its_parent = parent_domain;
3443 of_node = to_of_node(handle);
3444 if (of_node)
3445 its_of_probe(of_node);
3446 else
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003447 its_acpi_probe();
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003448
3449 if (list_empty(&its_nodes)) {
3450 pr_warn("ITS: No ITS available, not enabling LPIs\n");
3451 return -ENXIO;
3452 }
3453
3454 gic_rdists = rdists;
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003455 err = its_alloc_lpi_tables();
3456 if (err)
3457 return err;
3458
3459 list_for_each_entry(its, &its_nodes, entry)
3460 has_v4 |= its->is_v4;
3461
3462 if (has_v4 & rdists->has_vlpis) {
Marc Zyngier3d63cb52016-12-20 15:31:54 +00003463 if (its_init_vpe_domain() ||
3464 its_init_v4(parent_domain, &its_vpe_domain_ops)) {
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003465 rdists->has_vlpis = false;
3466 pr_err("ITS: Disabling GICv4 support\n");
3467 }
3468 }
3469
3470 return 0;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003471}