blob: 991cf33750c662e46658f75b9661f6cea4da6f3c [file] [log] [blame]
Marc Zyngiercc2d3212014-11-24 14:35:11 +00001/*
Marc Zyngierd7276b82016-12-20 15:11:47 +00002 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
Marc Zyngiercc2d3212014-11-24 14:35:11 +00003 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +020018#include <linux/acpi.h>
Hanjun Guo8d3554b2017-03-07 20:39:59 +080019#include <linux/acpi_iort.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000020#include <linux/bitmap.h>
21#include <linux/cpu.h>
22#include <linux/delay.h>
Robin Murphy44bb7e22016-09-12 17:13:59 +010023#include <linux/dma-iommu.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000024#include <linux/interrupt.h>
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +020025#include <linux/irqdomain.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000026#include <linux/log2.h>
27#include <linux/mm.h>
28#include <linux/msi.h>
29#include <linux/of.h>
30#include <linux/of_address.h>
31#include <linux/of_irq.h>
32#include <linux/of_pci.h>
33#include <linux/of_platform.h>
34#include <linux/percpu.h>
35#include <linux/slab.h>
36
Joel Porquet41a83e062015-07-07 17:11:46 -040037#include <linux/irqchip.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000038#include <linux/irqchip/arm-gic-v3.h>
Marc Zyngierc808eea2016-12-20 09:31:20 +000039#include <linux/irqchip/arm-gic-v4.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000040
Marc Zyngiercc2d3212014-11-24 14:35:11 +000041#include <asm/cputype.h>
42#include <asm/exception.h>
43
Robert Richter67510cc2015-09-21 22:58:37 +020044#include "irq-gic-common.h"
45
Robert Richter94100972015-09-21 22:58:38 +020046#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
47#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +020048#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
Marc Zyngiercc2d3212014-11-24 14:35:11 +000049
Marc Zyngierc48ed512014-11-24 14:35:12 +000050#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
51
Marc Zyngiera13b0402016-12-19 17:15:24 +000052static u32 lpi_id_bits;
53
54/*
55 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
56 * deal with (one configuration byte per interrupt). PENDBASE has to
57 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
58 */
59#define LPI_NRBITS lpi_id_bits
60#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
61#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
62
63#define LPI_PROP_DEFAULT_PRIO 0xa0
64
Marc Zyngiercc2d3212014-11-24 14:35:11 +000065/*
66 * Collection structure - just an ID, and a redistributor address to
67 * ping. We use one per CPU as a bag of interrupts assigned to this
68 * CPU.
69 */
70struct its_collection {
71 u64 target_address;
72 u16 col_id;
73};
74
75/*
Shanker Donthineni93473592016-06-06 18:17:30 -050076 * The ITS_BASER structure - contains memory information, cached
77 * value of BASER register configuration and ITS page size.
Shanker Donthineni466b7d12016-03-09 22:10:49 -060078 */
79struct its_baser {
80 void *base;
81 u64 val;
82 u32 order;
Shanker Donthineni93473592016-06-06 18:17:30 -050083 u32 psz;
Shanker Donthineni466b7d12016-03-09 22:10:49 -060084};
85
86/*
Marc Zyngiercc2d3212014-11-24 14:35:11 +000087 * The ITS structure - contains most of the infrastructure, with the
Marc Zyngier841514a2015-07-28 14:46:20 +010088 * top-level MSI domain, the command queue, the collections, and the
89 * list of devices writing to it.
Marc Zyngiercc2d3212014-11-24 14:35:11 +000090 */
91struct its_node {
92 raw_spinlock_t lock;
93 struct list_head entry;
Marc Zyngiercc2d3212014-11-24 14:35:11 +000094 void __iomem *base;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +020095 phys_addr_t phys_base;
Marc Zyngiercc2d3212014-11-24 14:35:11 +000096 struct its_cmd_block *cmd_base;
97 struct its_cmd_block *cmd_write;
Shanker Donthineni466b7d12016-03-09 22:10:49 -060098 struct its_baser tables[GITS_BASER_NR_REGS];
Marc Zyngiercc2d3212014-11-24 14:35:11 +000099 struct its_collection *collections;
100 struct list_head its_device_list;
101 u64 flags;
102 u32 ite_size;
Shanker Donthineni466b7d12016-03-09 22:10:49 -0600103 u32 device_ids;
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +0200104 int numa_node;
Marc Zyngier3dfa5762016-12-19 17:25:54 +0000105 bool is_v4;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000106};
107
108#define ITS_ITT_ALIGN SZ_256
109
Shanker Donthineni32bd44d2017-10-07 15:43:48 -0500110/* The maximum number of VPEID bits supported by VLPI commands */
111#define ITS_MAX_VPEID_BITS (16)
112#define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
113
Shanker Donthineni2eca0d62016-02-16 18:00:36 -0600114/* Convert page order to size in bytes */
115#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
116
Marc Zyngier591e5be2015-07-17 10:46:42 +0100117struct event_lpi_map {
118 unsigned long *lpi_map;
119 u16 *col_map;
120 irq_hw_number_t lpi_base;
121 int nr_lpis;
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000122 struct mutex vlpi_lock;
123 struct its_vm *vm;
124 struct its_vlpi_map *vlpi_maps;
125 int nr_vlpis;
Marc Zyngier591e5be2015-07-17 10:46:42 +0100126};
127
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000128/*
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000129 * The ITS view of a device - belongs to an ITS, owns an interrupt
130 * translation table, and a list of interrupts. If it some of its
131 * LPIs are injected into a guest (GICv4), the event_map.vm field
132 * indicates which one.
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000133 */
134struct its_device {
135 struct list_head entry;
136 struct its_node *its;
Marc Zyngier591e5be2015-07-17 10:46:42 +0100137 struct event_lpi_map event_map;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000138 void *itt;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000139 u32 nr_ites;
140 u32 device_id;
141};
142
Marc Zyngier20b3d542016-12-20 15:23:22 +0000143static struct {
144 raw_spinlock_t lock;
145 struct its_device *dev;
146 struct its_vpe **vpes;
147 int next_victim;
148} vpe_proxy;
149
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000150static LIST_HEAD(its_nodes);
151static DEFINE_SPINLOCK(its_lock);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000152static struct rdists *gic_rdists;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +0200153static struct irq_domain *its_parent;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000154
Marc Zyngier3dfa5762016-12-19 17:25:54 +0000155/*
156 * We have a maximum number of 16 ITSs in the whole system if we're
157 * using the ITSList mechanism
158 */
159#define ITS_LIST_MAX 16
160
161static unsigned long its_list_map;
Marc Zyngier3171a472016-12-20 15:17:28 +0000162static u16 vmovp_seq_num;
163static DEFINE_RAW_SPINLOCK(vmovp_lock);
164
Marc Zyngier7d75bbb2016-12-20 13:55:54 +0000165static DEFINE_IDA(its_vpeid_ida);
Marc Zyngier3dfa5762016-12-19 17:25:54 +0000166
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000167#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
168#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
Marc Zyngiere643d802016-12-20 15:09:31 +0000169#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000170
Marc Zyngier591e5be2015-07-17 10:46:42 +0100171static struct its_collection *dev_event_to_col(struct its_device *its_dev,
172 u32 event)
173{
174 struct its_node *its = its_dev->its;
175
176 return its->collections + its_dev->event_map.col_map[event];
177}
178
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000179/*
180 * ITS command descriptors - parameters to be encoded in a command
181 * block.
182 */
183struct its_cmd_desc {
184 union {
185 struct {
186 struct its_device *dev;
187 u32 event_id;
188 } its_inv_cmd;
189
190 struct {
191 struct its_device *dev;
192 u32 event_id;
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000193 } its_clear_cmd;
194
195 struct {
196 struct its_device *dev;
197 u32 event_id;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000198 } its_int_cmd;
199
200 struct {
201 struct its_device *dev;
202 int valid;
203 } its_mapd_cmd;
204
205 struct {
206 struct its_collection *col;
207 int valid;
208 } its_mapc_cmd;
209
210 struct {
211 struct its_device *dev;
212 u32 phys_id;
213 u32 event_id;
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000214 } its_mapti_cmd;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000215
216 struct {
217 struct its_device *dev;
218 struct its_collection *col;
Marc Zyngier591e5be2015-07-17 10:46:42 +0100219 u32 event_id;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000220 } its_movi_cmd;
221
222 struct {
223 struct its_device *dev;
224 u32 event_id;
225 } its_discard_cmd;
226
227 struct {
228 struct its_collection *col;
229 } its_invall_cmd;
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000230
231 struct {
232 struct its_vpe *vpe;
Marc Zyngiereb781922016-12-20 14:47:05 +0000233 } its_vinvall_cmd;
234
235 struct {
236 struct its_vpe *vpe;
237 struct its_collection *col;
238 bool valid;
239 } its_vmapp_cmd;
240
241 struct {
242 struct its_vpe *vpe;
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000243 struct its_device *dev;
244 u32 virt_id;
245 u32 event_id;
246 bool db_enabled;
247 } its_vmapti_cmd;
248
249 struct {
250 struct its_vpe *vpe;
251 struct its_device *dev;
252 u32 event_id;
253 bool db_enabled;
254 } its_vmovi_cmd;
Marc Zyngier3171a472016-12-20 15:17:28 +0000255
256 struct {
257 struct its_vpe *vpe;
258 struct its_collection *col;
259 u16 seq_num;
260 u16 its_list;
261 } its_vmovp_cmd;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000262 };
263};
264
265/*
266 * The ITS command block, which is what the ITS actually parses.
267 */
268struct its_cmd_block {
269 u64 raw_cmd[4];
270};
271
272#define ITS_CMD_QUEUE_SZ SZ_64K
273#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
274
275typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *,
276 struct its_cmd_desc *);
277
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000278typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_cmd_block *,
279 struct its_cmd_desc *);
280
Marc Zyngier4d36f132016-12-19 17:11:52 +0000281static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
282{
283 u64 mask = GENMASK_ULL(h, l);
284 *raw_cmd &= ~mask;
285 *raw_cmd |= (val << l) & mask;
286}
287
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000288static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
289{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000290 its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000291}
292
293static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
294{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000295 its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000296}
297
298static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
299{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000300 its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000301}
302
303static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
304{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000305 its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000306}
307
308static void its_encode_size(struct its_cmd_block *cmd, u8 size)
309{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000310 its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000311}
312
313static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
314{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000315 its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 50, 8);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000316}
317
318static void its_encode_valid(struct its_cmd_block *cmd, int valid)
319{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000320 its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000321}
322
323static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
324{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000325 its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 50, 16);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000326}
327
328static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
329{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000330 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000331}
332
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000333static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
334{
335 its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
336}
337
338static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
339{
340 its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
341}
342
343static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
344{
345 its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
346}
347
348static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
349{
350 its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
351}
352
Marc Zyngier3171a472016-12-20 15:17:28 +0000353static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
354{
355 its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
356}
357
358static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
359{
360 its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
361}
362
Marc Zyngiereb781922016-12-20 14:47:05 +0000363static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
364{
365 its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 50, 16);
366}
367
368static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
369{
370 its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
371}
372
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000373static inline void its_fixup_cmd(struct its_cmd_block *cmd)
374{
375 /* Let's fixup BE commands */
376 cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]);
377 cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]);
378 cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]);
379 cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]);
380}
381
382static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd,
383 struct its_cmd_desc *desc)
384{
385 unsigned long itt_addr;
Marc Zyngierc8481262014-12-12 10:51:24 +0000386 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000387
388 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
389 itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
390
391 its_encode_cmd(cmd, GITS_CMD_MAPD);
392 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
393 its_encode_size(cmd, size - 1);
394 its_encode_itt(cmd, itt_addr);
395 its_encode_valid(cmd, desc->its_mapd_cmd.valid);
396
397 its_fixup_cmd(cmd);
398
Marc Zyngier591e5be2015-07-17 10:46:42 +0100399 return NULL;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000400}
401
402static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
403 struct its_cmd_desc *desc)
404{
405 its_encode_cmd(cmd, GITS_CMD_MAPC);
406 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
407 its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
408 its_encode_valid(cmd, desc->its_mapc_cmd.valid);
409
410 its_fixup_cmd(cmd);
411
412 return desc->its_mapc_cmd.col;
413}
414
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000415static struct its_collection *its_build_mapti_cmd(struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000416 struct its_cmd_desc *desc)
417{
Marc Zyngier591e5be2015-07-17 10:46:42 +0100418 struct its_collection *col;
419
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000420 col = dev_event_to_col(desc->its_mapti_cmd.dev,
421 desc->its_mapti_cmd.event_id);
Marc Zyngier591e5be2015-07-17 10:46:42 +0100422
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000423 its_encode_cmd(cmd, GITS_CMD_MAPTI);
424 its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
425 its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
426 its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
Marc Zyngier591e5be2015-07-17 10:46:42 +0100427 its_encode_collection(cmd, col->col_id);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000428
429 its_fixup_cmd(cmd);
430
Marc Zyngier591e5be2015-07-17 10:46:42 +0100431 return col;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000432}
433
434static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd,
435 struct its_cmd_desc *desc)
436{
Marc Zyngier591e5be2015-07-17 10:46:42 +0100437 struct its_collection *col;
438
439 col = dev_event_to_col(desc->its_movi_cmd.dev,
440 desc->its_movi_cmd.event_id);
441
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000442 its_encode_cmd(cmd, GITS_CMD_MOVI);
443 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
Marc Zyngier591e5be2015-07-17 10:46:42 +0100444 its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000445 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
446
447 its_fixup_cmd(cmd);
448
Marc Zyngier591e5be2015-07-17 10:46:42 +0100449 return col;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000450}
451
452static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd,
453 struct its_cmd_desc *desc)
454{
Marc Zyngier591e5be2015-07-17 10:46:42 +0100455 struct its_collection *col;
456
457 col = dev_event_to_col(desc->its_discard_cmd.dev,
458 desc->its_discard_cmd.event_id);
459
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000460 its_encode_cmd(cmd, GITS_CMD_DISCARD);
461 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
462 its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
463
464 its_fixup_cmd(cmd);
465
Marc Zyngier591e5be2015-07-17 10:46:42 +0100466 return col;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000467}
468
469static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
470 struct its_cmd_desc *desc)
471{
Marc Zyngier591e5be2015-07-17 10:46:42 +0100472 struct its_collection *col;
473
474 col = dev_event_to_col(desc->its_inv_cmd.dev,
475 desc->its_inv_cmd.event_id);
476
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000477 its_encode_cmd(cmd, GITS_CMD_INV);
478 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
479 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
480
481 its_fixup_cmd(cmd);
482
Marc Zyngier591e5be2015-07-17 10:46:42 +0100483 return col;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000484}
485
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000486static struct its_collection *its_build_int_cmd(struct its_cmd_block *cmd,
487 struct its_cmd_desc *desc)
488{
489 struct its_collection *col;
490
491 col = dev_event_to_col(desc->its_int_cmd.dev,
492 desc->its_int_cmd.event_id);
493
494 its_encode_cmd(cmd, GITS_CMD_INT);
495 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
496 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
497
498 its_fixup_cmd(cmd);
499
500 return col;
501}
502
503static struct its_collection *its_build_clear_cmd(struct its_cmd_block *cmd,
504 struct its_cmd_desc *desc)
505{
506 struct its_collection *col;
507
508 col = dev_event_to_col(desc->its_clear_cmd.dev,
509 desc->its_clear_cmd.event_id);
510
511 its_encode_cmd(cmd, GITS_CMD_CLEAR);
512 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
513 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
514
515 its_fixup_cmd(cmd);
516
517 return col;
518}
519
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000520static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
521 struct its_cmd_desc *desc)
522{
523 its_encode_cmd(cmd, GITS_CMD_INVALL);
524 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
525
526 its_fixup_cmd(cmd);
527
528 return NULL;
529}
530
Marc Zyngiereb781922016-12-20 14:47:05 +0000531static struct its_vpe *its_build_vinvall_cmd(struct its_cmd_block *cmd,
532 struct its_cmd_desc *desc)
533{
534 its_encode_cmd(cmd, GITS_CMD_VINVALL);
535 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
536
537 its_fixup_cmd(cmd);
538
539 return desc->its_vinvall_cmd.vpe;
540}
541
542static struct its_vpe *its_build_vmapp_cmd(struct its_cmd_block *cmd,
543 struct its_cmd_desc *desc)
544{
545 unsigned long vpt_addr;
546
547 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
548
549 its_encode_cmd(cmd, GITS_CMD_VMAPP);
550 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
551 its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
552 its_encode_target(cmd, desc->its_vmapp_cmd.col->target_address);
553 its_encode_vpt_addr(cmd, vpt_addr);
554 its_encode_vpt_size(cmd, LPI_NRBITS - 1);
555
556 its_fixup_cmd(cmd);
557
558 return desc->its_vmapp_cmd.vpe;
559}
560
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000561static struct its_vpe *its_build_vmapti_cmd(struct its_cmd_block *cmd,
562 struct its_cmd_desc *desc)
563{
564 u32 db;
565
566 if (desc->its_vmapti_cmd.db_enabled)
567 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
568 else
569 db = 1023;
570
571 its_encode_cmd(cmd, GITS_CMD_VMAPTI);
572 its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
573 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
574 its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
575 its_encode_db_phys_id(cmd, db);
576 its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
577
578 its_fixup_cmd(cmd);
579
580 return desc->its_vmapti_cmd.vpe;
581}
582
583static struct its_vpe *its_build_vmovi_cmd(struct its_cmd_block *cmd,
584 struct its_cmd_desc *desc)
585{
586 u32 db;
587
588 if (desc->its_vmovi_cmd.db_enabled)
589 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
590 else
591 db = 1023;
592
593 its_encode_cmd(cmd, GITS_CMD_VMOVI);
594 its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
595 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
596 its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
597 its_encode_db_phys_id(cmd, db);
598 its_encode_db_valid(cmd, true);
599
600 its_fixup_cmd(cmd);
601
602 return desc->its_vmovi_cmd.vpe;
603}
604
Marc Zyngier3171a472016-12-20 15:17:28 +0000605static struct its_vpe *its_build_vmovp_cmd(struct its_cmd_block *cmd,
606 struct its_cmd_desc *desc)
607{
608 its_encode_cmd(cmd, GITS_CMD_VMOVP);
609 its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
610 its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
611 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
612 its_encode_target(cmd, desc->its_vmovp_cmd.col->target_address);
613
614 its_fixup_cmd(cmd);
615
616 return desc->its_vmovp_cmd.vpe;
617}
618
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000619static u64 its_cmd_ptr_to_offset(struct its_node *its,
620 struct its_cmd_block *ptr)
621{
622 return (ptr - its->cmd_base) * sizeof(*ptr);
623}
624
625static int its_queue_full(struct its_node *its)
626{
627 int widx;
628 int ridx;
629
630 widx = its->cmd_write - its->cmd_base;
631 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
632
633 /* This is incredibly unlikely to happen, unless the ITS locks up. */
634 if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
635 return 1;
636
637 return 0;
638}
639
640static struct its_cmd_block *its_allocate_entry(struct its_node *its)
641{
642 struct its_cmd_block *cmd;
643 u32 count = 1000000; /* 1s! */
644
645 while (its_queue_full(its)) {
646 count--;
647 if (!count) {
648 pr_err_ratelimited("ITS queue not draining\n");
649 return NULL;
650 }
651 cpu_relax();
652 udelay(1);
653 }
654
655 cmd = its->cmd_write++;
656
657 /* Handle queue wrapping */
658 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
659 its->cmd_write = its->cmd_base;
660
Marc Zyngier34d677a2016-12-19 17:16:45 +0000661 /* Clear command */
662 cmd->raw_cmd[0] = 0;
663 cmd->raw_cmd[1] = 0;
664 cmd->raw_cmd[2] = 0;
665 cmd->raw_cmd[3] = 0;
666
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000667 return cmd;
668}
669
670static struct its_cmd_block *its_post_commands(struct its_node *its)
671{
672 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
673
674 writel_relaxed(wr, its->base + GITS_CWRITER);
675
676 return its->cmd_write;
677}
678
679static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
680{
681 /*
682 * Make sure the commands written to memory are observable by
683 * the ITS.
684 */
685 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
Vladimir Murzin328191c2016-11-02 11:54:05 +0000686 gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000687 else
688 dsb(ishst);
689}
690
691static void its_wait_for_range_completion(struct its_node *its,
692 struct its_cmd_block *from,
693 struct its_cmd_block *to)
694{
695 u64 rd_idx, from_idx, to_idx;
696 u32 count = 1000000; /* 1s! */
697
698 from_idx = its_cmd_ptr_to_offset(its, from);
699 to_idx = its_cmd_ptr_to_offset(its, to);
700
701 while (1) {
702 rd_idx = readl_relaxed(its->base + GITS_CREADR);
Marc Zyngier9bdd8b12017-08-19 10:16:02 +0100703
704 /* Direct case */
705 if (from_idx < to_idx && rd_idx >= to_idx)
706 break;
707
708 /* Wrapped case */
709 if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000710 break;
711
712 count--;
713 if (!count) {
714 pr_err_ratelimited("ITS queue timeout\n");
715 return;
716 }
717 cpu_relax();
718 udelay(1);
719 }
720}
721
Marc Zyngiere4f90942016-12-19 17:56:32 +0000722/* Warning, macro hell follows */
723#define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
724void name(struct its_node *its, \
725 buildtype builder, \
726 struct its_cmd_desc *desc) \
727{ \
728 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
729 synctype *sync_obj; \
730 unsigned long flags; \
731 \
732 raw_spin_lock_irqsave(&its->lock, flags); \
733 \
734 cmd = its_allocate_entry(its); \
735 if (!cmd) { /* We're soooooo screewed... */ \
736 raw_spin_unlock_irqrestore(&its->lock, flags); \
737 return; \
738 } \
739 sync_obj = builder(cmd, desc); \
740 its_flush_cmd(its, cmd); \
741 \
742 if (sync_obj) { \
743 sync_cmd = its_allocate_entry(its); \
744 if (!sync_cmd) \
745 goto post; \
746 \
747 buildfn(sync_cmd, sync_obj); \
748 its_flush_cmd(its, sync_cmd); \
749 } \
750 \
751post: \
752 next_cmd = its_post_commands(its); \
753 raw_spin_unlock_irqrestore(&its->lock, flags); \
754 \
755 its_wait_for_range_completion(its, cmd, next_cmd); \
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000756}
757
Marc Zyngiere4f90942016-12-19 17:56:32 +0000758static void its_build_sync_cmd(struct its_cmd_block *sync_cmd,
759 struct its_collection *sync_col)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000760{
Marc Zyngiere4f90942016-12-19 17:56:32 +0000761 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
762 its_encode_target(sync_cmd, sync_col->target_address);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000763
Marc Zyngiere4f90942016-12-19 17:56:32 +0000764 its_fixup_cmd(sync_cmd);
765}
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000766
Marc Zyngiere4f90942016-12-19 17:56:32 +0000767static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
768 struct its_collection, its_build_sync_cmd)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000769
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000770static void its_build_vsync_cmd(struct its_cmd_block *sync_cmd,
771 struct its_vpe *sync_vpe)
772{
773 its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
774 its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000775
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000776 its_fixup_cmd(sync_cmd);
777}
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000778
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000779static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
780 struct its_vpe, its_build_vsync_cmd)
781
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000782static void its_send_int(struct its_device *dev, u32 event_id)
783{
784 struct its_cmd_desc desc;
785
786 desc.its_int_cmd.dev = dev;
787 desc.its_int_cmd.event_id = event_id;
788
789 its_send_single_command(dev->its, its_build_int_cmd, &desc);
790}
791
792static void its_send_clear(struct its_device *dev, u32 event_id)
793{
794 struct its_cmd_desc desc;
795
796 desc.its_clear_cmd.dev = dev;
797 desc.its_clear_cmd.event_id = event_id;
798
799 its_send_single_command(dev->its, its_build_clear_cmd, &desc);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000800}
801
802static void its_send_inv(struct its_device *dev, u32 event_id)
803{
804 struct its_cmd_desc desc;
805
806 desc.its_inv_cmd.dev = dev;
807 desc.its_inv_cmd.event_id = event_id;
808
809 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
810}
811
812static void its_send_mapd(struct its_device *dev, int valid)
813{
814 struct its_cmd_desc desc;
815
816 desc.its_mapd_cmd.dev = dev;
817 desc.its_mapd_cmd.valid = !!valid;
818
819 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
820}
821
822static void its_send_mapc(struct its_node *its, struct its_collection *col,
823 int valid)
824{
825 struct its_cmd_desc desc;
826
827 desc.its_mapc_cmd.col = col;
828 desc.its_mapc_cmd.valid = !!valid;
829
830 its_send_single_command(its, its_build_mapc_cmd, &desc);
831}
832
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000833static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000834{
835 struct its_cmd_desc desc;
836
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000837 desc.its_mapti_cmd.dev = dev;
838 desc.its_mapti_cmd.phys_id = irq_id;
839 desc.its_mapti_cmd.event_id = id;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000840
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000841 its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000842}
843
844static void its_send_movi(struct its_device *dev,
845 struct its_collection *col, u32 id)
846{
847 struct its_cmd_desc desc;
848
849 desc.its_movi_cmd.dev = dev;
850 desc.its_movi_cmd.col = col;
Marc Zyngier591e5be2015-07-17 10:46:42 +0100851 desc.its_movi_cmd.event_id = id;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000852
853 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
854}
855
856static void its_send_discard(struct its_device *dev, u32 id)
857{
858 struct its_cmd_desc desc;
859
860 desc.its_discard_cmd.dev = dev;
861 desc.its_discard_cmd.event_id = id;
862
863 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
864}
865
866static void its_send_invall(struct its_node *its, struct its_collection *col)
867{
868 struct its_cmd_desc desc;
869
870 desc.its_invall_cmd.col = col;
871
872 its_send_single_command(its, its_build_invall_cmd, &desc);
873}
Marc Zyngierc48ed512014-11-24 14:35:12 +0000874
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000875static void its_send_vmapti(struct its_device *dev, u32 id)
876{
877 struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
878 struct its_cmd_desc desc;
879
880 desc.its_vmapti_cmd.vpe = map->vpe;
881 desc.its_vmapti_cmd.dev = dev;
882 desc.its_vmapti_cmd.virt_id = map->vintid;
883 desc.its_vmapti_cmd.event_id = id;
884 desc.its_vmapti_cmd.db_enabled = map->db_enabled;
885
886 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
887}
888
889static void its_send_vmovi(struct its_device *dev, u32 id)
890{
891 struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
892 struct its_cmd_desc desc;
893
894 desc.its_vmovi_cmd.vpe = map->vpe;
895 desc.its_vmovi_cmd.dev = dev;
896 desc.its_vmovi_cmd.event_id = id;
897 desc.its_vmovi_cmd.db_enabled = map->db_enabled;
898
899 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
900}
901
Marc Zyngiereb781922016-12-20 14:47:05 +0000902static void its_send_vmapp(struct its_vpe *vpe, bool valid)
903{
904 struct its_cmd_desc desc;
905 struct its_node *its;
906
907 desc.its_vmapp_cmd.vpe = vpe;
908 desc.its_vmapp_cmd.valid = valid;
909
910 list_for_each_entry(its, &its_nodes, entry) {
911 if (!its->is_v4)
912 continue;
913
914 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
915 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
916 }
917}
918
Marc Zyngier3171a472016-12-20 15:17:28 +0000919static void its_send_vmovp(struct its_vpe *vpe)
920{
921 struct its_cmd_desc desc;
922 struct its_node *its;
923 unsigned long flags;
924 int col_id = vpe->col_idx;
925
926 desc.its_vmovp_cmd.vpe = vpe;
927 desc.its_vmovp_cmd.its_list = (u16)its_list_map;
928
929 if (!its_list_map) {
930 its = list_first_entry(&its_nodes, struct its_node, entry);
931 desc.its_vmovp_cmd.seq_num = 0;
932 desc.its_vmovp_cmd.col = &its->collections[col_id];
933 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
934 return;
935 }
936
937 /*
938 * Yet another marvel of the architecture. If using the
939 * its_list "feature", we need to make sure that all ITSs
940 * receive all VMOVP commands in the same order. The only way
941 * to guarantee this is to make vmovp a serialization point.
942 *
943 * Wall <-- Head.
944 */
945 raw_spin_lock_irqsave(&vmovp_lock, flags);
946
947 desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
948
949 /* Emit VMOVPs */
950 list_for_each_entry(its, &its_nodes, entry) {
951 if (!its->is_v4)
952 continue;
953
954 desc.its_vmovp_cmd.col = &its->collections[col_id];
955 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
956 }
957
958 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
959}
960
Marc Zyngiereb781922016-12-20 14:47:05 +0000961static void its_send_vinvall(struct its_vpe *vpe)
962{
963 struct its_cmd_desc desc;
964 struct its_node *its;
965
966 desc.its_vinvall_cmd.vpe = vpe;
967
968 list_for_each_entry(its, &its_nodes, entry) {
969 if (!its->is_v4)
970 continue;
971 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
972 }
973}
974
Marc Zyngierc48ed512014-11-24 14:35:12 +0000975/*
976 * irqchip functions - assumes MSI, mostly.
977 */
978
979static inline u32 its_get_event_id(struct irq_data *d)
980{
981 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
Marc Zyngier591e5be2015-07-17 10:46:42 +0100982 return d->hwirq - its_dev->event_map.lpi_base;
Marc Zyngierc48ed512014-11-24 14:35:12 +0000983}
984
Marc Zyngier015ec032016-12-20 09:54:57 +0000985static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
Marc Zyngierc48ed512014-11-24 14:35:12 +0000986{
Marc Zyngier015ec032016-12-20 09:54:57 +0000987 irq_hw_number_t hwirq;
Marc Zyngieradcdb942016-12-19 19:18:13 +0000988 struct page *prop_page;
989 u8 *cfg;
Marc Zyngierc48ed512014-11-24 14:35:12 +0000990
Marc Zyngier015ec032016-12-20 09:54:57 +0000991 if (irqd_is_forwarded_to_vcpu(d)) {
992 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
993 u32 event = its_get_event_id(d);
994
995 prop_page = its_dev->event_map.vm->vprop_page;
996 hwirq = its_dev->event_map.vlpi_maps[event].vintid;
997 } else {
998 prop_page = gic_rdists->prop_page;
999 hwirq = d->hwirq;
1000 }
Marc Zyngieradcdb942016-12-19 19:18:13 +00001001
1002 cfg = page_address(prop_page) + hwirq - 8192;
1003 *cfg &= ~clr;
Marc Zyngier015ec032016-12-20 09:54:57 +00001004 *cfg |= set | LPI_PROP_GROUP1;
Marc Zyngierc48ed512014-11-24 14:35:12 +00001005
1006 /*
1007 * Make the above write visible to the redistributors.
1008 * And yes, we're flushing exactly: One. Single. Byte.
1009 * Humpf...
1010 */
1011 if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
Vladimir Murzin328191c2016-11-02 11:54:05 +00001012 gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
Marc Zyngierc48ed512014-11-24 14:35:12 +00001013 else
1014 dsb(ishst);
Marc Zyngier015ec032016-12-20 09:54:57 +00001015}
1016
1017static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1018{
1019 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1020
1021 lpi_write_config(d, clr, set);
Marc Zyngieradcdb942016-12-19 19:18:13 +00001022 its_send_inv(its_dev, its_get_event_id(d));
Marc Zyngierc48ed512014-11-24 14:35:12 +00001023}
1024
Marc Zyngier015ec032016-12-20 09:54:57 +00001025static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1026{
1027 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1028 u32 event = its_get_event_id(d);
1029
1030 if (its_dev->event_map.vlpi_maps[event].db_enabled == enable)
1031 return;
1032
1033 its_dev->event_map.vlpi_maps[event].db_enabled = enable;
1034
1035 /*
1036 * More fun with the architecture:
1037 *
1038 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1039 * value or to 1023, depending on the enable bit. But that
1040 * would be issueing a mapping for an /existing/ DevID+EventID
1041 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1042 * to the /same/ vPE, using this opportunity to adjust the
1043 * doorbell. Mouahahahaha. We loves it, Precious.
1044 */
1045 its_send_vmovi(its_dev, event);
Marc Zyngierc48ed512014-11-24 14:35:12 +00001046}
1047
1048static void its_mask_irq(struct irq_data *d)
1049{
Marc Zyngier015ec032016-12-20 09:54:57 +00001050 if (irqd_is_forwarded_to_vcpu(d))
1051 its_vlpi_set_doorbell(d, false);
1052
Marc Zyngieradcdb942016-12-19 19:18:13 +00001053 lpi_update_config(d, LPI_PROP_ENABLED, 0);
Marc Zyngierc48ed512014-11-24 14:35:12 +00001054}
1055
1056static void its_unmask_irq(struct irq_data *d)
1057{
Marc Zyngier015ec032016-12-20 09:54:57 +00001058 if (irqd_is_forwarded_to_vcpu(d))
1059 its_vlpi_set_doorbell(d, true);
1060
Marc Zyngieradcdb942016-12-19 19:18:13 +00001061 lpi_update_config(d, 0, LPI_PROP_ENABLED);
Marc Zyngierc48ed512014-11-24 14:35:12 +00001062}
1063
Marc Zyngierc48ed512014-11-24 14:35:12 +00001064static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1065 bool force)
1066{
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02001067 unsigned int cpu;
1068 const struct cpumask *cpu_mask = cpu_online_mask;
Marc Zyngierc48ed512014-11-24 14:35:12 +00001069 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1070 struct its_collection *target_col;
1071 u32 id = its_get_event_id(d);
1072
Marc Zyngier015ec032016-12-20 09:54:57 +00001073 /* A forwarded interrupt should use irq_set_vcpu_affinity */
1074 if (irqd_is_forwarded_to_vcpu(d))
1075 return -EINVAL;
1076
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02001077 /* lpi cannot be routed to a redistributor that is on a foreign node */
1078 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
1079 if (its_dev->its->numa_node >= 0) {
1080 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
1081 if (!cpumask_intersects(mask_val, cpu_mask))
1082 return -EINVAL;
1083 }
1084 }
1085
1086 cpu = cpumask_any_and(mask_val, cpu_mask);
1087
Marc Zyngierc48ed512014-11-24 14:35:12 +00001088 if (cpu >= nr_cpu_ids)
1089 return -EINVAL;
1090
MaJun8b8d94a2017-05-18 16:19:13 +08001091 /* don't set the affinity when the target cpu is same as current one */
1092 if (cpu != its_dev->event_map.col_map[id]) {
1093 target_col = &its_dev->its->collections[cpu];
1094 its_send_movi(its_dev, target_col, id);
1095 its_dev->event_map.col_map[id] = cpu;
Marc Zyngier0d224d32017-08-18 09:39:18 +01001096 irq_data_update_effective_affinity(d, cpumask_of(cpu));
MaJun8b8d94a2017-05-18 16:19:13 +08001097 }
Marc Zyngierc48ed512014-11-24 14:35:12 +00001098
1099 return IRQ_SET_MASK_OK_DONE;
1100}
1101
Marc Zyngierb48ac832014-11-24 14:35:16 +00001102static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1103{
1104 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1105 struct its_node *its;
1106 u64 addr;
1107
1108 its = its_dev->its;
1109 addr = its->phys_base + GITS_TRANSLATER;
1110
Vladimir Murzinb11283e2016-11-02 11:54:03 +00001111 msg->address_lo = lower_32_bits(addr);
1112 msg->address_hi = upper_32_bits(addr);
Marc Zyngierb48ac832014-11-24 14:35:16 +00001113 msg->data = its_get_event_id(d);
Robin Murphy44bb7e22016-09-12 17:13:59 +01001114
1115 iommu_dma_map_msi_msg(d->irq, msg);
Marc Zyngierb48ac832014-11-24 14:35:16 +00001116}
1117
Marc Zyngier8d85dce2016-12-19 18:02:13 +00001118static int its_irq_set_irqchip_state(struct irq_data *d,
1119 enum irqchip_irq_state which,
1120 bool state)
1121{
1122 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1123 u32 event = its_get_event_id(d);
1124
1125 if (which != IRQCHIP_STATE_PENDING)
1126 return -EINVAL;
1127
1128 if (state)
1129 its_send_int(its_dev, event);
1130 else
1131 its_send_clear(its_dev, event);
1132
1133 return 0;
1134}
1135
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001136static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1137{
1138 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1139 u32 event = its_get_event_id(d);
1140 int ret = 0;
1141
1142 if (!info->map)
1143 return -EINVAL;
1144
1145 mutex_lock(&its_dev->event_map.vlpi_lock);
1146
1147 if (!its_dev->event_map.vm) {
1148 struct its_vlpi_map *maps;
1149
1150 maps = kzalloc(sizeof(*maps) * its_dev->event_map.nr_lpis,
1151 GFP_KERNEL);
1152 if (!maps) {
1153 ret = -ENOMEM;
1154 goto out;
1155 }
1156
1157 its_dev->event_map.vm = info->map->vm;
1158 its_dev->event_map.vlpi_maps = maps;
1159 } else if (its_dev->event_map.vm != info->map->vm) {
1160 ret = -EINVAL;
1161 goto out;
1162 }
1163
1164 /* Get our private copy of the mapping information */
1165 its_dev->event_map.vlpi_maps[event] = *info->map;
1166
1167 if (irqd_is_forwarded_to_vcpu(d)) {
1168 /* Already mapped, move it around */
1169 its_send_vmovi(its_dev, event);
1170 } else {
1171 /* Drop the physical mapping */
1172 its_send_discard(its_dev, event);
1173
1174 /* and install the virtual one */
1175 its_send_vmapti(its_dev, event);
1176 irqd_set_forwarded_to_vcpu(d);
1177
1178 /* Increment the number of VLPIs */
1179 its_dev->event_map.nr_vlpis++;
1180 }
1181
1182out:
1183 mutex_unlock(&its_dev->event_map.vlpi_lock);
1184 return ret;
1185}
1186
1187static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1188{
1189 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1190 u32 event = its_get_event_id(d);
1191 int ret = 0;
1192
1193 mutex_lock(&its_dev->event_map.vlpi_lock);
1194
1195 if (!its_dev->event_map.vm ||
1196 !its_dev->event_map.vlpi_maps[event].vm) {
1197 ret = -EINVAL;
1198 goto out;
1199 }
1200
1201 /* Copy our mapping information to the incoming request */
1202 *info->map = its_dev->event_map.vlpi_maps[event];
1203
1204out:
1205 mutex_unlock(&its_dev->event_map.vlpi_lock);
1206 return ret;
1207}
1208
1209static int its_vlpi_unmap(struct irq_data *d)
1210{
1211 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1212 u32 event = its_get_event_id(d);
1213 int ret = 0;
1214
1215 mutex_lock(&its_dev->event_map.vlpi_lock);
1216
1217 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
1218 ret = -EINVAL;
1219 goto out;
1220 }
1221
1222 /* Drop the virtual mapping */
1223 its_send_discard(its_dev, event);
1224
1225 /* and restore the physical one */
1226 irqd_clr_forwarded_to_vcpu(d);
1227 its_send_mapti(its_dev, d->hwirq, event);
1228 lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
1229 LPI_PROP_ENABLED |
1230 LPI_PROP_GROUP1));
1231
1232 /*
1233 * Drop the refcount and make the device available again if
1234 * this was the last VLPI.
1235 */
1236 if (!--its_dev->event_map.nr_vlpis) {
1237 its_dev->event_map.vm = NULL;
1238 kfree(its_dev->event_map.vlpi_maps);
1239 }
1240
1241out:
1242 mutex_unlock(&its_dev->event_map.vlpi_lock);
1243 return ret;
1244}
1245
Marc Zyngier015ec032016-12-20 09:54:57 +00001246static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
1247{
1248 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1249
1250 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1251 return -EINVAL;
1252
1253 if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
1254 lpi_update_config(d, 0xff, info->config);
1255 else
1256 lpi_write_config(d, 0xff, info->config);
1257 its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
1258
1259 return 0;
1260}
1261
Marc Zyngierc808eea2016-12-20 09:31:20 +00001262static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
1263{
1264 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1265 struct its_cmd_info *info = vcpu_info;
1266
1267 /* Need a v4 ITS */
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001268 if (!its_dev->its->is_v4)
Marc Zyngierc808eea2016-12-20 09:31:20 +00001269 return -EINVAL;
1270
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001271 /* Unmap request? */
1272 if (!info)
1273 return its_vlpi_unmap(d);
1274
Marc Zyngierc808eea2016-12-20 09:31:20 +00001275 switch (info->cmd_type) {
1276 case MAP_VLPI:
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001277 return its_vlpi_map(d, info);
Marc Zyngierc808eea2016-12-20 09:31:20 +00001278
1279 case GET_VLPI:
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001280 return its_vlpi_get(d, info);
Marc Zyngierc808eea2016-12-20 09:31:20 +00001281
1282 case PROP_UPDATE_VLPI:
1283 case PROP_UPDATE_AND_INV_VLPI:
Marc Zyngier015ec032016-12-20 09:54:57 +00001284 return its_vlpi_prop_update(d, info);
Marc Zyngierc808eea2016-12-20 09:31:20 +00001285
1286 default:
1287 return -EINVAL;
1288 }
1289}
1290
Marc Zyngierc48ed512014-11-24 14:35:12 +00001291static struct irq_chip its_irq_chip = {
1292 .name = "ITS",
1293 .irq_mask = its_mask_irq,
1294 .irq_unmask = its_unmask_irq,
Ashok Kumar004fa082016-02-11 05:38:53 -08001295 .irq_eoi = irq_chip_eoi_parent,
Marc Zyngierc48ed512014-11-24 14:35:12 +00001296 .irq_set_affinity = its_set_affinity,
Marc Zyngierb48ac832014-11-24 14:35:16 +00001297 .irq_compose_msi_msg = its_irq_compose_msi_msg,
Marc Zyngier8d85dce2016-12-19 18:02:13 +00001298 .irq_set_irqchip_state = its_irq_set_irqchip_state,
Marc Zyngierc808eea2016-12-20 09:31:20 +00001299 .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
Marc Zyngierb48ac832014-11-24 14:35:16 +00001300};
1301
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001302/*
1303 * How we allocate LPIs:
1304 *
1305 * The GIC has id_bits bits for interrupt identifiers. From there, we
1306 * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as
1307 * we allocate LPIs by chunks of 32, we can shift the whole thing by 5
1308 * bits to the right.
1309 *
1310 * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
1311 */
1312#define IRQS_PER_CHUNK_SHIFT 5
1313#define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT)
Shanker Donthineni6c31e122017-06-22 18:19:14 -05001314#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001315
1316static unsigned long *lpi_bitmap;
1317static u32 lpi_chunks;
1318static DEFINE_SPINLOCK(lpi_lock);
1319
1320static int its_lpi_to_chunk(int lpi)
1321{
1322 return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT;
1323}
1324
1325static int its_chunk_to_lpi(int chunk)
1326{
1327 return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192;
1328}
1329
Tomasz Nowicki04a0e4d2016-01-19 14:11:18 +01001330static int __init its_lpi_init(u32 id_bits)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001331{
1332 lpi_chunks = its_lpi_to_chunk(1UL << id_bits);
1333
1334 lpi_bitmap = kzalloc(BITS_TO_LONGS(lpi_chunks) * sizeof(long),
1335 GFP_KERNEL);
1336 if (!lpi_bitmap) {
1337 lpi_chunks = 0;
1338 return -ENOMEM;
1339 }
1340
1341 pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks);
1342 return 0;
1343}
1344
1345static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids)
1346{
1347 unsigned long *bitmap = NULL;
1348 int chunk_id;
1349 int nr_chunks;
1350 int i;
1351
1352 nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK);
1353
1354 spin_lock(&lpi_lock);
1355
1356 do {
1357 chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks,
1358 0, nr_chunks, 0);
1359 if (chunk_id < lpi_chunks)
1360 break;
1361
1362 nr_chunks--;
1363 } while (nr_chunks > 0);
1364
1365 if (!nr_chunks)
1366 goto out;
1367
1368 bitmap = kzalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK) * sizeof (long),
1369 GFP_ATOMIC);
1370 if (!bitmap)
1371 goto out;
1372
1373 for (i = 0; i < nr_chunks; i++)
1374 set_bit(chunk_id + i, lpi_bitmap);
1375
1376 *base = its_chunk_to_lpi(chunk_id);
1377 *nr_ids = nr_chunks * IRQS_PER_CHUNK;
1378
1379out:
1380 spin_unlock(&lpi_lock);
1381
Marc Zyngierc8415b92015-10-02 16:44:05 +01001382 if (!bitmap)
1383 *base = *nr_ids = 0;
1384
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001385 return bitmap;
1386}
1387
Marc Zyngiercf2be8b2016-12-19 18:49:59 +00001388static void its_lpi_free_chunks(unsigned long *bitmap, int base, int nr_ids)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001389{
1390 int lpi;
1391
1392 spin_lock(&lpi_lock);
1393
1394 for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) {
1395 int chunk = its_lpi_to_chunk(lpi);
Marc Zyngiercf2be8b2016-12-19 18:49:59 +00001396
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001397 BUG_ON(chunk > lpi_chunks);
1398 if (test_bit(chunk, lpi_bitmap)) {
1399 clear_bit(chunk, lpi_bitmap);
1400 } else {
1401 pr_err("Bad LPI chunk %d\n", chunk);
1402 }
1403 }
1404
1405 spin_unlock(&lpi_lock);
1406
Marc Zyngiercf2be8b2016-12-19 18:49:59 +00001407 kfree(bitmap);
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001408}
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001409
Marc Zyngier0e5ccf92016-12-19 18:15:05 +00001410static struct page *its_allocate_prop_table(gfp_t gfp_flags)
1411{
1412 struct page *prop_page;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001413
Marc Zyngier0e5ccf92016-12-19 18:15:05 +00001414 prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
1415 if (!prop_page)
1416 return NULL;
1417
1418 /* Priority 0xa0, Group-1, disabled */
1419 memset(page_address(prop_page),
1420 LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1,
1421 LPI_PROPBASE_SZ);
1422
1423 /* Make sure the GIC will observe the written configuration */
1424 gic_flush_dcache_to_poc(page_address(prop_page), LPI_PROPBASE_SZ);
1425
1426 return prop_page;
1427}
1428
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00001429static void its_free_prop_table(struct page *prop_page)
1430{
1431 free_pages((unsigned long)page_address(prop_page),
1432 get_order(LPI_PROPBASE_SZ));
1433}
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001434
1435static int __init its_alloc_lpi_tables(void)
1436{
1437 phys_addr_t paddr;
1438
Shanker Donthineni6c31e122017-06-22 18:19:14 -05001439 lpi_id_bits = min_t(u32, gic_rdists->id_bits, ITS_MAX_LPI_NRBITS);
Marc Zyngier0e5ccf92016-12-19 18:15:05 +00001440 gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001441 if (!gic_rdists->prop_page) {
1442 pr_err("Failed to allocate PROPBASE\n");
1443 return -ENOMEM;
1444 }
1445
1446 paddr = page_to_phys(gic_rdists->prop_page);
1447 pr_info("GIC: using LPI property table @%pa\n", &paddr);
1448
Shanker Donthineni6c31e122017-06-22 18:19:14 -05001449 return its_lpi_init(lpi_id_bits);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001450}
1451
1452static const char *its_base_type_string[] = {
1453 [GITS_BASER_TYPE_DEVICE] = "Devices",
1454 [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
Marc Zyngier4f46de92016-12-20 15:50:14 +00001455 [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)",
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001456 [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
1457 [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
1458 [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
1459 [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
1460};
1461
Shanker Donthineni2d81d422016-06-06 18:17:28 -05001462static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
1463{
1464 u32 idx = baser - its->tables;
1465
Vladimir Murzin0968a612016-11-02 11:54:06 +00001466 return gits_read_baser(its->base + GITS_BASER + (idx << 3));
Shanker Donthineni2d81d422016-06-06 18:17:28 -05001467}
1468
1469static void its_write_baser(struct its_node *its, struct its_baser *baser,
1470 u64 val)
1471{
1472 u32 idx = baser - its->tables;
1473
Vladimir Murzin0968a612016-11-02 11:54:06 +00001474 gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
Shanker Donthineni2d81d422016-06-06 18:17:28 -05001475 baser->val = its_read_baser(its, baser);
1476}
1477
Shanker Donthineni93473592016-06-06 18:17:30 -05001478static int its_setup_baser(struct its_node *its, struct its_baser *baser,
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001479 u64 cache, u64 shr, u32 psz, u32 order,
1480 bool indirect)
Shanker Donthineni93473592016-06-06 18:17:30 -05001481{
1482 u64 val = its_read_baser(its, baser);
1483 u64 esz = GITS_BASER_ENTRY_SIZE(val);
1484 u64 type = GITS_BASER_TYPE(val);
1485 u32 alloc_pages;
1486 void *base;
1487 u64 tmp;
1488
1489retry_alloc_baser:
1490 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
1491 if (alloc_pages > GITS_BASER_PAGES_MAX) {
1492 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
1493 &its->phys_base, its_base_type_string[type],
1494 alloc_pages, GITS_BASER_PAGES_MAX);
1495 alloc_pages = GITS_BASER_PAGES_MAX;
1496 order = get_order(GITS_BASER_PAGES_MAX * psz);
1497 }
1498
1499 base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
1500 if (!base)
1501 return -ENOMEM;
1502
1503retry_baser:
1504 val = (virt_to_phys(base) |
1505 (type << GITS_BASER_TYPE_SHIFT) |
1506 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
1507 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
1508 cache |
1509 shr |
1510 GITS_BASER_VALID);
1511
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001512 val |= indirect ? GITS_BASER_INDIRECT : 0x0;
1513
Shanker Donthineni93473592016-06-06 18:17:30 -05001514 switch (psz) {
1515 case SZ_4K:
1516 val |= GITS_BASER_PAGE_SIZE_4K;
1517 break;
1518 case SZ_16K:
1519 val |= GITS_BASER_PAGE_SIZE_16K;
1520 break;
1521 case SZ_64K:
1522 val |= GITS_BASER_PAGE_SIZE_64K;
1523 break;
1524 }
1525
1526 its_write_baser(its, baser, val);
1527 tmp = baser->val;
1528
1529 if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
1530 /*
1531 * Shareability didn't stick. Just use
1532 * whatever the read reported, which is likely
1533 * to be the only thing this redistributor
1534 * supports. If that's zero, make it
1535 * non-cacheable as well.
1536 */
1537 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
1538 if (!shr) {
1539 cache = GITS_BASER_nC;
Vladimir Murzin328191c2016-11-02 11:54:05 +00001540 gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
Shanker Donthineni93473592016-06-06 18:17:30 -05001541 }
1542 goto retry_baser;
1543 }
1544
1545 if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
1546 /*
1547 * Page size didn't stick. Let's try a smaller
1548 * size and retry. If we reach 4K, then
1549 * something is horribly wrong...
1550 */
1551 free_pages((unsigned long)base, order);
1552 baser->base = NULL;
1553
1554 switch (psz) {
1555 case SZ_16K:
1556 psz = SZ_4K;
1557 goto retry_alloc_baser;
1558 case SZ_64K:
1559 psz = SZ_16K;
1560 goto retry_alloc_baser;
1561 }
1562 }
1563
1564 if (val != tmp) {
Vladimir Murzinb11283e2016-11-02 11:54:03 +00001565 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
Shanker Donthineni93473592016-06-06 18:17:30 -05001566 &its->phys_base, its_base_type_string[type],
Vladimir Murzinb11283e2016-11-02 11:54:03 +00001567 val, tmp);
Shanker Donthineni93473592016-06-06 18:17:30 -05001568 free_pages((unsigned long)base, order);
1569 return -ENXIO;
1570 }
1571
1572 baser->order = order;
1573 baser->base = base;
1574 baser->psz = psz;
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001575 tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
Shanker Donthineni93473592016-06-06 18:17:30 -05001576
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001577 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
Vladimir Murzind524eaa2016-11-02 11:54:04 +00001578 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
Shanker Donthineni93473592016-06-06 18:17:30 -05001579 its_base_type_string[type],
1580 (unsigned long)virt_to_phys(base),
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001581 indirect ? "indirect" : "flat", (int)esz,
Shanker Donthineni93473592016-06-06 18:17:30 -05001582 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
1583
1584 return 0;
1585}
1586
Marc Zyngier4cacac52016-12-19 18:18:34 +00001587static bool its_parse_indirect_baser(struct its_node *its,
1588 struct its_baser *baser,
Shanker Donthineni32bd44d2017-10-07 15:43:48 -05001589 u32 psz, u32 *order, u32 ids)
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001590{
Marc Zyngier4cacac52016-12-19 18:18:34 +00001591 u64 tmp = its_read_baser(its, baser);
1592 u64 type = GITS_BASER_TYPE(tmp);
1593 u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06001594 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001595 u32 new_order = *order;
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001596 bool indirect = false;
1597
1598 /* No need to enable Indirection if memory requirement < (psz*2)bytes */
1599 if ((esz << ids) > (psz * 2)) {
1600 /*
1601 * Find out whether hw supports a single or two-level table by
1602 * table by reading bit at offset '62' after writing '1' to it.
1603 */
1604 its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
1605 indirect = !!(baser->val & GITS_BASER_INDIRECT);
1606
1607 if (indirect) {
1608 /*
1609 * The size of the lvl2 table is equal to ITS page size
1610 * which is 'psz'. For computing lvl1 table size,
1611 * subtract ID bits that sparse lvl2 table from 'ids'
1612 * which is reported by ITS hardware times lvl1 table
1613 * entry size.
1614 */
Vladimir Murzind524eaa2016-11-02 11:54:04 +00001615 ids -= ilog2(psz / (int)esz);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001616 esz = GITS_LVL1_ENTRY_SIZE;
1617 }
1618 }
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001619
1620 /*
1621 * Allocate as many entries as required to fit the
1622 * range of device IDs that the ITS can grok... The ID
1623 * space being incredibly sparse, this results in a
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001624 * massive waste of memory if two-level device table
1625 * feature is not supported by hardware.
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001626 */
1627 new_order = max_t(u32, get_order(esz << ids), new_order);
1628 if (new_order >= MAX_ORDER) {
1629 new_order = MAX_ORDER - 1;
Vladimir Murzind524eaa2016-11-02 11:54:04 +00001630 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
Marc Zyngier4cacac52016-12-19 18:18:34 +00001631 pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n",
1632 &its->phys_base, its_base_type_string[type],
1633 its->device_ids, ids);
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001634 }
1635
1636 *order = new_order;
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001637
1638 return indirect;
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001639}
1640
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001641static void its_free_tables(struct its_node *its)
1642{
1643 int i;
1644
1645 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
Shanker Donthineni1a485f42016-02-01 20:19:44 -06001646 if (its->tables[i].base) {
1647 free_pages((unsigned long)its->tables[i].base,
1648 its->tables[i].order);
1649 its->tables[i].base = NULL;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001650 }
1651 }
1652}
1653
Shanker Donthineni0e0b0f62016-06-06 18:17:31 -05001654static int its_alloc_tables(struct its_node *its)
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001655{
Marc Zyngier589ce5f2016-10-14 15:13:07 +01001656 u64 typer = gic_read_typer(its->base + GITS_TYPER);
Shanker Donthineni93473592016-06-06 18:17:30 -05001657 u32 ids = GITS_TYPER_DEVBITS(typer);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001658 u64 shr = GITS_BASER_InnerShareable;
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06001659 u64 cache = GITS_BASER_RaWaWb;
Shanker Donthineni93473592016-06-06 18:17:30 -05001660 u32 psz = SZ_64K;
1661 int err, i;
Robert Richter94100972015-09-21 22:58:38 +02001662
1663 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) {
1664 /*
Shanker Donthineni93473592016-06-06 18:17:30 -05001665 * erratum 22375: only alloc 8MB table size
1666 * erratum 24313: ignore memory access type
1667 */
1668 cache = GITS_BASER_nCnB;
1669 ids = 0x14; /* 20 bits, 8MB */
Robert Richter94100972015-09-21 22:58:38 +02001670 }
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001671
Shanker Donthineni466b7d12016-03-09 22:10:49 -06001672 its->device_ids = ids;
1673
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001674 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
Shanker Donthineni2d81d422016-06-06 18:17:28 -05001675 struct its_baser *baser = its->tables + i;
1676 u64 val = its_read_baser(its, baser);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001677 u64 type = GITS_BASER_TYPE(val);
Shanker Donthineni93473592016-06-06 18:17:30 -05001678 u32 order = get_order(psz);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001679 bool indirect = false;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001680
Marc Zyngier4cacac52016-12-19 18:18:34 +00001681 switch (type) {
1682 case GITS_BASER_TYPE_NONE:
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001683 continue;
1684
Marc Zyngier4cacac52016-12-19 18:18:34 +00001685 case GITS_BASER_TYPE_DEVICE:
Shanker Donthineni32bd44d2017-10-07 15:43:48 -05001686 indirect = its_parse_indirect_baser(its, baser,
1687 psz, &order,
1688 its->device_ids);
Marc Zyngier4cacac52016-12-19 18:18:34 +00001689 case GITS_BASER_TYPE_VCPU:
1690 indirect = its_parse_indirect_baser(its, baser,
Shanker Donthineni32bd44d2017-10-07 15:43:48 -05001691 psz, &order,
1692 ITS_MAX_VPEID_BITS);
Marc Zyngier4cacac52016-12-19 18:18:34 +00001693 break;
1694 }
Marc Zyngierf54b97e2015-03-06 16:37:41 +00001695
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001696 err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
Shanker Donthineni93473592016-06-06 18:17:30 -05001697 if (err < 0) {
1698 its_free_tables(its);
1699 return err;
Robert Richter30f21362015-09-21 22:58:34 +02001700 }
1701
Shanker Donthineni93473592016-06-06 18:17:30 -05001702 /* Update settings which will be used for next BASERn */
1703 psz = baser->psz;
1704 cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
1705 shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001706 }
1707
1708 return 0;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001709}
1710
1711static int its_alloc_collections(struct its_node *its)
1712{
1713 its->collections = kzalloc(nr_cpu_ids * sizeof(*its->collections),
1714 GFP_KERNEL);
1715 if (!its->collections)
1716 return -ENOMEM;
1717
1718 return 0;
1719}
1720
Marc Zyngier7c297a22016-12-19 18:34:38 +00001721static struct page *its_allocate_pending_table(gfp_t gfp_flags)
1722{
1723 struct page *pend_page;
1724 /*
1725 * The pending pages have to be at least 64kB aligned,
1726 * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
1727 */
1728 pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
1729 get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
1730 if (!pend_page)
1731 return NULL;
1732
1733 /* Make sure the GIC will observe the zero-ed page */
1734 gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
1735
1736 return pend_page;
1737}
1738
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00001739static void its_free_pending_table(struct page *pt)
1740{
1741 free_pages((unsigned long)page_address(pt),
1742 get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
1743}
1744
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001745static void its_cpu_init_lpis(void)
1746{
1747 void __iomem *rbase = gic_data_rdist_rd_base();
1748 struct page *pend_page;
1749 u64 val, tmp;
1750
1751 /* If we didn't allocate the pending table yet, do it now */
1752 pend_page = gic_data_rdist()->pend_page;
1753 if (!pend_page) {
1754 phys_addr_t paddr;
Marc Zyngier7c297a22016-12-19 18:34:38 +00001755
1756 pend_page = its_allocate_pending_table(GFP_NOWAIT);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001757 if (!pend_page) {
1758 pr_err("Failed to allocate PENDBASE for CPU%d\n",
1759 smp_processor_id());
1760 return;
1761 }
1762
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001763 paddr = page_to_phys(pend_page);
1764 pr_info("CPU%d: using LPI pending table @%pa\n",
1765 smp_processor_id(), &paddr);
1766 gic_data_rdist()->pend_page = pend_page;
1767 }
1768
1769 /* Disable LPIs */
1770 val = readl_relaxed(rbase + GICR_CTLR);
1771 val &= ~GICR_CTLR_ENABLE_LPIS;
1772 writel_relaxed(val, rbase + GICR_CTLR);
1773
1774 /*
1775 * Make sure any change to the table is observable by the GIC.
1776 */
1777 dsb(sy);
1778
1779 /* set PROPBASE */
1780 val = (page_to_phys(gic_rdists->prop_page) |
1781 GICR_PROPBASER_InnerShareable |
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06001782 GICR_PROPBASER_RaWaWb |
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001783 ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
1784
Vladimir Murzin0968a612016-11-02 11:54:06 +00001785 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
1786 tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001787
1788 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
Marc Zyngier241a3862015-03-27 14:15:05 +00001789 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
1790 /*
1791 * The HW reports non-shareable, we must
1792 * remove the cacheability attributes as
1793 * well.
1794 */
1795 val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
1796 GICR_PROPBASER_CACHEABILITY_MASK);
1797 val |= GICR_PROPBASER_nC;
Vladimir Murzin0968a612016-11-02 11:54:06 +00001798 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
Marc Zyngier241a3862015-03-27 14:15:05 +00001799 }
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001800 pr_info_once("GIC: using cache flushing for LPI property table\n");
1801 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
1802 }
1803
1804 /* set PENDBASE */
1805 val = (page_to_phys(pend_page) |
Marc Zyngier4ad3e362015-03-27 14:15:04 +00001806 GICR_PENDBASER_InnerShareable |
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06001807 GICR_PENDBASER_RaWaWb);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001808
Vladimir Murzin0968a612016-11-02 11:54:06 +00001809 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
1810 tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
Marc Zyngier241a3862015-03-27 14:15:05 +00001811
1812 if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
1813 /*
1814 * The HW reports non-shareable, we must remove the
1815 * cacheability attributes as well.
1816 */
1817 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
1818 GICR_PENDBASER_CACHEABILITY_MASK);
1819 val |= GICR_PENDBASER_nC;
Vladimir Murzin0968a612016-11-02 11:54:06 +00001820 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
Marc Zyngier241a3862015-03-27 14:15:05 +00001821 }
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001822
1823 /* Enable LPIs */
1824 val = readl_relaxed(rbase + GICR_CTLR);
1825 val |= GICR_CTLR_ENABLE_LPIS;
1826 writel_relaxed(val, rbase + GICR_CTLR);
1827
1828 /* Make sure the GIC has seen the above */
1829 dsb(sy);
1830}
1831
1832static void its_cpu_init_collection(void)
1833{
1834 struct its_node *its;
1835 int cpu;
1836
1837 spin_lock(&its_lock);
1838 cpu = smp_processor_id();
1839
1840 list_for_each_entry(its, &its_nodes, entry) {
1841 u64 target;
1842
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02001843 /* avoid cross node collections and its mapping */
1844 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
1845 struct device_node *cpu_node;
1846
1847 cpu_node = of_get_cpu_node(cpu, NULL);
1848 if (its->numa_node != NUMA_NO_NODE &&
1849 its->numa_node != of_node_to_nid(cpu_node))
1850 continue;
1851 }
1852
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001853 /*
1854 * We now have to bind each collection to its target
1855 * redistributor.
1856 */
Marc Zyngier589ce5f2016-10-14 15:13:07 +01001857 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001858 /*
1859 * This ITS wants the physical address of the
1860 * redistributor.
1861 */
1862 target = gic_data_rdist()->phys_base;
1863 } else {
1864 /*
1865 * This ITS wants a linear CPU number.
1866 */
Marc Zyngier589ce5f2016-10-14 15:13:07 +01001867 target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
Marc Zyngier263fcd32015-03-27 14:15:02 +00001868 target = GICR_TYPER_CPU_NUMBER(target) << 16;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001869 }
1870
1871 /* Perform collection mapping */
1872 its->collections[cpu].target_address = target;
1873 its->collections[cpu].col_id = cpu;
1874
1875 its_send_mapc(its, &its->collections[cpu], 1);
1876 its_send_invall(its, &its->collections[cpu]);
1877 }
1878
1879 spin_unlock(&its_lock);
1880}
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00001881
1882static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
1883{
1884 struct its_device *its_dev = NULL, *tmp;
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00001885 unsigned long flags;
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00001886
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00001887 raw_spin_lock_irqsave(&its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00001888
1889 list_for_each_entry(tmp, &its->its_device_list, entry) {
1890 if (tmp->device_id == dev_id) {
1891 its_dev = tmp;
1892 break;
1893 }
1894 }
1895
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00001896 raw_spin_unlock_irqrestore(&its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00001897
1898 return its_dev;
1899}
1900
Shanker Donthineni466b7d12016-03-09 22:10:49 -06001901static struct its_baser *its_get_baser(struct its_node *its, u32 type)
1902{
1903 int i;
1904
1905 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
1906 if (GITS_BASER_TYPE(its->tables[i].val) == type)
1907 return &its->tables[i];
1908 }
1909
1910 return NULL;
1911}
1912
Marc Zyngier70cc81e2016-12-19 18:53:02 +00001913static bool its_alloc_table_entry(struct its_baser *baser, u32 id)
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001914{
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001915 struct page *page;
1916 u32 esz, idx;
1917 __le64 *table;
1918
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001919 /* Don't allow device id that exceeds single, flat table limit */
1920 esz = GITS_BASER_ENTRY_SIZE(baser->val);
1921 if (!(baser->val & GITS_BASER_INDIRECT))
Marc Zyngier70cc81e2016-12-19 18:53:02 +00001922 return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001923
1924 /* Compute 1st level table index & check if that exceeds table limit */
Marc Zyngier70cc81e2016-12-19 18:53:02 +00001925 idx = id >> ilog2(baser->psz / esz);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001926 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
1927 return false;
1928
1929 table = baser->base;
1930
1931 /* Allocate memory for 2nd level table */
1932 if (!table[idx]) {
1933 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz));
1934 if (!page)
1935 return false;
1936
1937 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
1938 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
Vladimir Murzin328191c2016-11-02 11:54:05 +00001939 gic_flush_dcache_to_poc(page_address(page), baser->psz);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001940
1941 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
1942
1943 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
1944 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
Vladimir Murzin328191c2016-11-02 11:54:05 +00001945 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001946
1947 /* Ensure updated table contents are visible to ITS hardware */
1948 dsb(sy);
1949 }
1950
1951 return true;
1952}
1953
Marc Zyngier70cc81e2016-12-19 18:53:02 +00001954static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
1955{
1956 struct its_baser *baser;
1957
1958 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
1959
1960 /* Don't allow device id that exceeds ITS hardware limit */
1961 if (!baser)
1962 return (ilog2(dev_id) < its->device_ids);
1963
1964 return its_alloc_table_entry(baser, dev_id);
1965}
1966
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00001967static bool its_alloc_vpe_table(u32 vpe_id)
1968{
1969 struct its_node *its;
1970
1971 /*
1972 * Make sure the L2 tables are allocated on *all* v4 ITSs. We
1973 * could try and only do it on ITSs corresponding to devices
1974 * that have interrupts targeted at this VPE, but the
1975 * complexity becomes crazy (and you have tons of memory
1976 * anyway, right?).
1977 */
1978 list_for_each_entry(its, &its_nodes, entry) {
1979 struct its_baser *baser;
1980
1981 if (!its->is_v4)
1982 continue;
1983
1984 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
1985 if (!baser)
1986 return false;
1987
1988 if (!its_alloc_table_entry(baser, vpe_id))
1989 return false;
1990 }
1991
1992 return true;
1993}
1994
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00001995static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
Marc Zyngier93f94ea2017-08-04 18:37:09 +01001996 int nvecs, bool alloc_lpis)
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00001997{
1998 struct its_device *dev;
Marc Zyngier93f94ea2017-08-04 18:37:09 +01001999 unsigned long *lpi_map = NULL;
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002000 unsigned long flags;
Marc Zyngier591e5be2015-07-17 10:46:42 +01002001 u16 *col_map = NULL;
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002002 void *itt;
2003 int lpi_base;
2004 int nr_lpis;
Marc Zyngierc8481262014-12-12 10:51:24 +00002005 int nr_ites;
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002006 int sz;
2007
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002008 if (!its_alloc_device_table(its, dev_id))
Shanker Donthineni466b7d12016-03-09 22:10:49 -06002009 return NULL;
2010
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002011 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
Marc Zyngierc8481262014-12-12 10:51:24 +00002012 /*
2013 * At least one bit of EventID is being used, hence a minimum
2014 * of two entries. No, the architecture doesn't let you
2015 * express an ITT with a single entry.
2016 */
Will Deacon96555c42014-12-17 14:11:09 +00002017 nr_ites = max(2UL, roundup_pow_of_two(nvecs));
Marc Zyngierc8481262014-12-12 10:51:24 +00002018 sz = nr_ites * its->ite_size;
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002019 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
Yun Wu6c834122015-03-06 16:37:46 +00002020 itt = kzalloc(sz, GFP_KERNEL);
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002021 if (alloc_lpis) {
2022 lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
2023 if (lpi_map)
2024 col_map = kzalloc(sizeof(*col_map) * nr_lpis,
2025 GFP_KERNEL);
2026 } else {
2027 col_map = kzalloc(sizeof(*col_map) * nr_ites, GFP_KERNEL);
2028 nr_lpis = 0;
2029 lpi_base = 0;
2030 }
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002031
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002032 if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002033 kfree(dev);
2034 kfree(itt);
2035 kfree(lpi_map);
Marc Zyngier591e5be2015-07-17 10:46:42 +01002036 kfree(col_map);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002037 return NULL;
2038 }
2039
Vladimir Murzin328191c2016-11-02 11:54:05 +00002040 gic_flush_dcache_to_poc(itt, sz);
Marc Zyngier5a9a8912015-09-13 12:14:32 +01002041
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002042 dev->its = its;
2043 dev->itt = itt;
Marc Zyngierc8481262014-12-12 10:51:24 +00002044 dev->nr_ites = nr_ites;
Marc Zyngier591e5be2015-07-17 10:46:42 +01002045 dev->event_map.lpi_map = lpi_map;
2046 dev->event_map.col_map = col_map;
2047 dev->event_map.lpi_base = lpi_base;
2048 dev->event_map.nr_lpis = nr_lpis;
Marc Zyngierd011e4e2016-12-20 09:44:41 +00002049 mutex_init(&dev->event_map.vlpi_lock);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002050 dev->device_id = dev_id;
2051 INIT_LIST_HEAD(&dev->entry);
2052
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002053 raw_spin_lock_irqsave(&its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002054 list_add(&dev->entry, &its->its_device_list);
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002055 raw_spin_unlock_irqrestore(&its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002056
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002057 /* Map device to its ITT */
2058 its_send_mapd(dev, 1);
2059
2060 return dev;
2061}
2062
2063static void its_free_device(struct its_device *its_dev)
2064{
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002065 unsigned long flags;
2066
2067 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002068 list_del(&its_dev->entry);
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002069 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002070 kfree(its_dev->itt);
2071 kfree(its_dev);
2072}
Marc Zyngierb48ac832014-11-24 14:35:16 +00002073
2074static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
2075{
2076 int idx;
2077
Marc Zyngier591e5be2015-07-17 10:46:42 +01002078 idx = find_first_zero_bit(dev->event_map.lpi_map,
2079 dev->event_map.nr_lpis);
2080 if (idx == dev->event_map.nr_lpis)
Marc Zyngierb48ac832014-11-24 14:35:16 +00002081 return -ENOSPC;
2082
Marc Zyngier591e5be2015-07-17 10:46:42 +01002083 *hwirq = dev->event_map.lpi_base + idx;
2084 set_bit(idx, dev->event_map.lpi_map);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002085
Marc Zyngierb48ac832014-11-24 14:35:16 +00002086 return 0;
2087}
2088
Marc Zyngier54456db2015-07-28 14:46:21 +01002089static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
2090 int nvec, msi_alloc_info_t *info)
Marc Zyngiere8137f42015-03-06 16:37:42 +00002091{
Marc Zyngierb48ac832014-11-24 14:35:16 +00002092 struct its_node *its;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002093 struct its_device *its_dev;
Marc Zyngier54456db2015-07-28 14:46:21 +01002094 struct msi_domain_info *msi_info;
2095 u32 dev_id;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002096
Marc Zyngier54456db2015-07-28 14:46:21 +01002097 /*
2098 * We ignore "dev" entierely, and rely on the dev_id that has
2099 * been passed via the scratchpad. This limits this domain's
2100 * usefulness to upper layers that definitely know that they
2101 * are built on top of the ITS.
2102 */
2103 dev_id = info->scratchpad[0].ul;
2104
2105 msi_info = msi_get_domain_info(domain);
2106 its = msi_info->data;
2107
Marc Zyngier20b3d542016-12-20 15:23:22 +00002108 if (!gic_rdists->has_direct_lpi &&
2109 vpe_proxy.dev &&
2110 vpe_proxy.dev->its == its &&
2111 dev_id == vpe_proxy.dev->device_id) {
2112 /* Bad luck. Get yourself a better implementation */
2113 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
2114 dev_id);
2115 return -EINVAL;
2116 }
2117
Marc Zyngierf1304202015-07-28 14:46:18 +01002118 its_dev = its_find_device(its, dev_id);
Marc Zyngiere8137f42015-03-06 16:37:42 +00002119 if (its_dev) {
2120 /*
2121 * We already have seen this ID, probably through
2122 * another alias (PCI bridge of some sort). No need to
2123 * create the device.
2124 */
Marc Zyngierf1304202015-07-28 14:46:18 +01002125 pr_debug("Reusing ITT for devID %x\n", dev_id);
Marc Zyngiere8137f42015-03-06 16:37:42 +00002126 goto out;
2127 }
Marc Zyngierb48ac832014-11-24 14:35:16 +00002128
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002129 its_dev = its_create_device(its, dev_id, nvec, true);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002130 if (!its_dev)
2131 return -ENOMEM;
2132
Marc Zyngierf1304202015-07-28 14:46:18 +01002133 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
Marc Zyngiere8137f42015-03-06 16:37:42 +00002134out:
Marc Zyngierb48ac832014-11-24 14:35:16 +00002135 info->scratchpad[0].ptr = its_dev;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002136 return 0;
2137}
2138
Marc Zyngier54456db2015-07-28 14:46:21 +01002139static struct msi_domain_ops its_msi_domain_ops = {
2140 .msi_prepare = its_msi_prepare,
2141};
2142
Marc Zyngierb48ac832014-11-24 14:35:16 +00002143static int its_irq_gic_domain_alloc(struct irq_domain *domain,
2144 unsigned int virq,
2145 irq_hw_number_t hwirq)
2146{
Marc Zyngierf833f572015-10-13 12:51:33 +01002147 struct irq_fwspec fwspec;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002148
Marc Zyngierf833f572015-10-13 12:51:33 +01002149 if (irq_domain_get_of_node(domain->parent)) {
2150 fwspec.fwnode = domain->parent->fwnode;
2151 fwspec.param_count = 3;
2152 fwspec.param[0] = GIC_IRQ_TYPE_LPI;
2153 fwspec.param[1] = hwirq;
2154 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02002155 } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
2156 fwspec.fwnode = domain->parent->fwnode;
2157 fwspec.param_count = 2;
2158 fwspec.param[0] = hwirq;
2159 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
Marc Zyngierf833f572015-10-13 12:51:33 +01002160 } else {
2161 return -EINVAL;
2162 }
Marc Zyngierb48ac832014-11-24 14:35:16 +00002163
Marc Zyngierf833f572015-10-13 12:51:33 +01002164 return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002165}
2166
2167static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2168 unsigned int nr_irqs, void *args)
2169{
2170 msi_alloc_info_t *info = args;
2171 struct its_device *its_dev = info->scratchpad[0].ptr;
2172 irq_hw_number_t hwirq;
2173 int err;
2174 int i;
2175
2176 for (i = 0; i < nr_irqs; i++) {
2177 err = its_alloc_device_irq(its_dev, &hwirq);
2178 if (err)
2179 return err;
2180
2181 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq);
2182 if (err)
2183 return err;
2184
2185 irq_domain_set_hwirq_and_chip(domain, virq + i,
2186 hwirq, &its_irq_chip, its_dev);
Marc Zyngier0d224d32017-08-18 09:39:18 +01002187 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
Marc Zyngierf1304202015-07-28 14:46:18 +01002188 pr_debug("ID:%d pID:%d vID:%d\n",
2189 (int)(hwirq - its_dev->event_map.lpi_base),
2190 (int) hwirq, virq + i);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002191 }
2192
2193 return 0;
2194}
2195
Marc Zyngieraca268d2014-12-12 10:51:23 +00002196static void its_irq_domain_activate(struct irq_domain *domain,
2197 struct irq_data *d)
2198{
2199 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2200 u32 event = its_get_event_id(d);
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02002201 const struct cpumask *cpu_mask = cpu_online_mask;
Marc Zyngier0d224d32017-08-18 09:39:18 +01002202 int cpu;
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02002203
2204 /* get the cpu_mask of local node */
2205 if (its_dev->its->numa_node >= 0)
2206 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
Marc Zyngieraca268d2014-12-12 10:51:23 +00002207
Marc Zyngier591e5be2015-07-17 10:46:42 +01002208 /* Bind the LPI to the first possible CPU */
Marc Zyngier0d224d32017-08-18 09:39:18 +01002209 cpu = cpumask_first(cpu_mask);
2210 its_dev->event_map.col_map[event] = cpu;
2211 irq_data_update_effective_affinity(d, cpumask_of(cpu));
Marc Zyngier591e5be2015-07-17 10:46:42 +01002212
Marc Zyngieraca268d2014-12-12 10:51:23 +00002213 /* Map the GIC IRQ and event to the device */
Marc Zyngier6a25ad32016-12-20 15:52:26 +00002214 its_send_mapti(its_dev, d->hwirq, event);
Marc Zyngieraca268d2014-12-12 10:51:23 +00002215}
2216
2217static void its_irq_domain_deactivate(struct irq_domain *domain,
2218 struct irq_data *d)
2219{
2220 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2221 u32 event = its_get_event_id(d);
2222
2223 /* Stop the delivery of interrupts */
2224 its_send_discard(its_dev, event);
2225}
2226
Marc Zyngierb48ac832014-11-24 14:35:16 +00002227static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
2228 unsigned int nr_irqs)
2229{
2230 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
2231 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2232 int i;
2233
2234 for (i = 0; i < nr_irqs; i++) {
2235 struct irq_data *data = irq_domain_get_irq_data(domain,
2236 virq + i);
Marc Zyngieraca268d2014-12-12 10:51:23 +00002237 u32 event = its_get_event_id(data);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002238
2239 /* Mark interrupt index as unused */
Marc Zyngier591e5be2015-07-17 10:46:42 +01002240 clear_bit(event, its_dev->event_map.lpi_map);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002241
2242 /* Nuke the entry in the domain */
Marc Zyngier2da39942014-12-12 10:51:22 +00002243 irq_domain_reset_irq_data(data);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002244 }
2245
2246 /* If all interrupts have been freed, start mopping the floor */
Marc Zyngier591e5be2015-07-17 10:46:42 +01002247 if (bitmap_empty(its_dev->event_map.lpi_map,
2248 its_dev->event_map.nr_lpis)) {
Marc Zyngiercf2be8b2016-12-19 18:49:59 +00002249 its_lpi_free_chunks(its_dev->event_map.lpi_map,
2250 its_dev->event_map.lpi_base,
2251 its_dev->event_map.nr_lpis);
2252 kfree(its_dev->event_map.col_map);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002253
2254 /* Unmap device/itt */
2255 its_send_mapd(its_dev, 0);
2256 its_free_device(its_dev);
2257 }
2258
2259 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
2260}
2261
2262static const struct irq_domain_ops its_domain_ops = {
2263 .alloc = its_irq_domain_alloc,
2264 .free = its_irq_domain_free,
Marc Zyngieraca268d2014-12-12 10:51:23 +00002265 .activate = its_irq_domain_activate,
2266 .deactivate = its_irq_domain_deactivate,
Marc Zyngierb48ac832014-11-24 14:35:16 +00002267};
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00002268
Marc Zyngier20b3d542016-12-20 15:23:22 +00002269/*
2270 * This is insane.
2271 *
2272 * If a GICv4 doesn't implement Direct LPIs (which is extremely
2273 * likely), the only way to perform an invalidate is to use a fake
2274 * device to issue an INV command, implying that the LPI has first
2275 * been mapped to some event on that device. Since this is not exactly
2276 * cheap, we try to keep that mapping around as long as possible, and
2277 * only issue an UNMAP if we're short on available slots.
2278 *
2279 * Broken by design(tm).
2280 */
2281static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
2282{
2283 /* Already unmapped? */
2284 if (vpe->vpe_proxy_event == -1)
2285 return;
2286
2287 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
2288 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
2289
2290 /*
2291 * We don't track empty slots at all, so let's move the
2292 * next_victim pointer if we can quickly reuse that slot
2293 * instead of nuking an existing entry. Not clear that this is
2294 * always a win though, and this might just generate a ripple
2295 * effect... Let's just hope VPEs don't migrate too often.
2296 */
2297 if (vpe_proxy.vpes[vpe_proxy.next_victim])
2298 vpe_proxy.next_victim = vpe->vpe_proxy_event;
2299
2300 vpe->vpe_proxy_event = -1;
2301}
2302
2303static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
2304{
2305 if (!gic_rdists->has_direct_lpi) {
2306 unsigned long flags;
2307
2308 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2309 its_vpe_db_proxy_unmap_locked(vpe);
2310 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2311 }
2312}
2313
2314static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
2315{
2316 /* Already mapped? */
2317 if (vpe->vpe_proxy_event != -1)
2318 return;
2319
2320 /* This slot was already allocated. Kick the other VPE out. */
2321 if (vpe_proxy.vpes[vpe_proxy.next_victim])
2322 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
2323
2324 /* Map the new VPE instead */
2325 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
2326 vpe->vpe_proxy_event = vpe_proxy.next_victim;
2327 vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
2328
2329 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
2330 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
2331}
2332
Marc Zyngier958b90d2017-08-18 16:14:17 +01002333static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
2334{
2335 unsigned long flags;
2336 struct its_collection *target_col;
2337
2338 if (gic_rdists->has_direct_lpi) {
2339 void __iomem *rdbase;
2340
2341 rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
2342 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
2343 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2344 cpu_relax();
2345
2346 return;
2347 }
2348
2349 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2350
2351 its_vpe_db_proxy_map_locked(vpe);
2352
2353 target_col = &vpe_proxy.dev->its->collections[to];
2354 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
2355 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
2356
2357 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2358}
2359
Marc Zyngier3171a472016-12-20 15:17:28 +00002360static int its_vpe_set_affinity(struct irq_data *d,
2361 const struct cpumask *mask_val,
2362 bool force)
2363{
2364 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2365 int cpu = cpumask_first(mask_val);
2366
2367 /*
2368 * Changing affinity is mega expensive, so let's be as lazy as
Marc Zyngier20b3d542016-12-20 15:23:22 +00002369 * we can and only do it if we really have to. Also, if mapped
Marc Zyngier958b90d2017-08-18 16:14:17 +01002370 * into the proxy device, we need to move the doorbell
2371 * interrupt to its new location.
Marc Zyngier3171a472016-12-20 15:17:28 +00002372 */
2373 if (vpe->col_idx != cpu) {
Marc Zyngier958b90d2017-08-18 16:14:17 +01002374 int from = vpe->col_idx;
2375
Marc Zyngier3171a472016-12-20 15:17:28 +00002376 vpe->col_idx = cpu;
2377 its_send_vmovp(vpe);
Marc Zyngier958b90d2017-08-18 16:14:17 +01002378 its_vpe_db_proxy_move(vpe, from, cpu);
Marc Zyngier3171a472016-12-20 15:17:28 +00002379 }
2380
2381 return IRQ_SET_MASK_OK_DONE;
2382}
2383
Marc Zyngiere643d802016-12-20 15:09:31 +00002384static void its_vpe_schedule(struct its_vpe *vpe)
2385{
2386 void * __iomem vlpi_base = gic_data_rdist_vlpi_base();
2387 u64 val;
2388
2389 /* Schedule the VPE */
2390 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
2391 GENMASK_ULL(51, 12);
2392 val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
2393 val |= GICR_VPROPBASER_RaWb;
2394 val |= GICR_VPROPBASER_InnerShareable;
2395 gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2396
2397 val = virt_to_phys(page_address(vpe->vpt_page)) &
2398 GENMASK_ULL(51, 16);
2399 val |= GICR_VPENDBASER_RaWaWb;
2400 val |= GICR_VPENDBASER_NonShareable;
2401 /*
2402 * There is no good way of finding out if the pending table is
2403 * empty as we can race against the doorbell interrupt very
2404 * easily. So in the end, vpe->pending_last is only an
2405 * indication that the vcpu has something pending, not one
2406 * that the pending table is empty. A good implementation
2407 * would be able to read its coarse map pretty quickly anyway,
2408 * making this a tolerable issue.
2409 */
2410 val |= GICR_VPENDBASER_PendingLast;
2411 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
2412 val |= GICR_VPENDBASER_Valid;
2413 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2414}
2415
2416static void its_vpe_deschedule(struct its_vpe *vpe)
2417{
2418 void * __iomem vlpi_base = gic_data_rdist_vlpi_base();
2419 u32 count = 1000000; /* 1s! */
2420 bool clean;
2421 u64 val;
2422
2423 /* We're being scheduled out */
2424 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2425 val &= ~GICR_VPENDBASER_Valid;
2426 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2427
2428 do {
2429 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2430 clean = !(val & GICR_VPENDBASER_Dirty);
2431 if (!clean) {
2432 count--;
2433 cpu_relax();
2434 udelay(1);
2435 }
2436 } while (!clean && count);
2437
2438 if (unlikely(!clean && !count)) {
2439 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
2440 vpe->idai = false;
2441 vpe->pending_last = true;
2442 } else {
2443 vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
2444 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
2445 }
2446}
2447
2448static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
2449{
2450 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2451 struct its_cmd_info *info = vcpu_info;
2452
2453 switch (info->cmd_type) {
2454 case SCHEDULE_VPE:
2455 its_vpe_schedule(vpe);
2456 return 0;
2457
2458 case DESCHEDULE_VPE:
2459 its_vpe_deschedule(vpe);
2460 return 0;
2461
Marc Zyngier5e2f7642016-12-20 15:10:50 +00002462 case INVALL_VPE:
2463 its_send_vinvall(vpe);
2464 return 0;
2465
Marc Zyngiere643d802016-12-20 15:09:31 +00002466 default:
2467 return -EINVAL;
2468 }
2469}
2470
Marc Zyngier20b3d542016-12-20 15:23:22 +00002471static void its_vpe_send_cmd(struct its_vpe *vpe,
2472 void (*cmd)(struct its_device *, u32))
2473{
2474 unsigned long flags;
2475
2476 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2477
2478 its_vpe_db_proxy_map_locked(vpe);
2479 cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
2480
2481 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2482}
2483
Marc Zyngierf6a91da2016-12-20 15:20:38 +00002484static void its_vpe_send_inv(struct irq_data *d)
2485{
2486 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
Marc Zyngierf6a91da2016-12-20 15:20:38 +00002487
Marc Zyngier20b3d542016-12-20 15:23:22 +00002488 if (gic_rdists->has_direct_lpi) {
2489 void __iomem *rdbase;
2490
2491 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
2492 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR);
2493 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2494 cpu_relax();
2495 } else {
2496 its_vpe_send_cmd(vpe, its_send_inv);
2497 }
Marc Zyngierf6a91da2016-12-20 15:20:38 +00002498}
2499
2500static void its_vpe_mask_irq(struct irq_data *d)
2501{
2502 /*
2503 * We need to unmask the LPI, which is described by the parent
2504 * irq_data. Instead of calling into the parent (which won't
2505 * exactly do the right thing, let's simply use the
2506 * parent_data pointer. Yes, I'm naughty.
2507 */
2508 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
2509 its_vpe_send_inv(d);
2510}
2511
2512static void its_vpe_unmask_irq(struct irq_data *d)
2513{
2514 /* Same hack as above... */
2515 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
2516 its_vpe_send_inv(d);
2517}
2518
Marc Zyngiere57a3e282017-07-31 14:47:24 +01002519static int its_vpe_set_irqchip_state(struct irq_data *d,
2520 enum irqchip_irq_state which,
2521 bool state)
2522{
2523 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2524
2525 if (which != IRQCHIP_STATE_PENDING)
2526 return -EINVAL;
2527
2528 if (gic_rdists->has_direct_lpi) {
2529 void __iomem *rdbase;
2530
2531 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
2532 if (state) {
2533 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
2534 } else {
2535 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
2536 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2537 cpu_relax();
2538 }
2539 } else {
2540 if (state)
2541 its_vpe_send_cmd(vpe, its_send_int);
2542 else
2543 its_vpe_send_cmd(vpe, its_send_clear);
2544 }
2545
2546 return 0;
2547}
2548
Marc Zyngier8fff27a2016-12-20 13:41:55 +00002549static struct irq_chip its_vpe_irq_chip = {
2550 .name = "GICv4-vpe",
Marc Zyngierf6a91da2016-12-20 15:20:38 +00002551 .irq_mask = its_vpe_mask_irq,
2552 .irq_unmask = its_vpe_unmask_irq,
2553 .irq_eoi = irq_chip_eoi_parent,
Marc Zyngier3171a472016-12-20 15:17:28 +00002554 .irq_set_affinity = its_vpe_set_affinity,
Marc Zyngiere57a3e282017-07-31 14:47:24 +01002555 .irq_set_irqchip_state = its_vpe_set_irqchip_state,
Marc Zyngiere643d802016-12-20 15:09:31 +00002556 .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
Marc Zyngier8fff27a2016-12-20 13:41:55 +00002557};
2558
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002559static int its_vpe_id_alloc(void)
2560{
Shanker Donthineni32bd44d2017-10-07 15:43:48 -05002561 return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002562}
2563
2564static void its_vpe_id_free(u16 id)
2565{
2566 ida_simple_remove(&its_vpeid_ida, id);
2567}
2568
2569static int its_vpe_init(struct its_vpe *vpe)
2570{
2571 struct page *vpt_page;
2572 int vpe_id;
2573
2574 /* Allocate vpe_id */
2575 vpe_id = its_vpe_id_alloc();
2576 if (vpe_id < 0)
2577 return vpe_id;
2578
2579 /* Allocate VPT */
2580 vpt_page = its_allocate_pending_table(GFP_KERNEL);
2581 if (!vpt_page) {
2582 its_vpe_id_free(vpe_id);
2583 return -ENOMEM;
2584 }
2585
2586 if (!its_alloc_vpe_table(vpe_id)) {
2587 its_vpe_id_free(vpe_id);
2588 its_free_pending_table(vpe->vpt_page);
2589 return -ENOMEM;
2590 }
2591
2592 vpe->vpe_id = vpe_id;
2593 vpe->vpt_page = vpt_page;
Marc Zyngier20b3d542016-12-20 15:23:22 +00002594 vpe->vpe_proxy_event = -1;
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002595
2596 return 0;
2597}
2598
2599static void its_vpe_teardown(struct its_vpe *vpe)
2600{
Marc Zyngier20b3d542016-12-20 15:23:22 +00002601 its_vpe_db_proxy_unmap(vpe);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002602 its_vpe_id_free(vpe->vpe_id);
2603 its_free_pending_table(vpe->vpt_page);
2604}
2605
2606static void its_vpe_irq_domain_free(struct irq_domain *domain,
2607 unsigned int virq,
2608 unsigned int nr_irqs)
2609{
2610 struct its_vm *vm = domain->host_data;
2611 int i;
2612
2613 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
2614
2615 for (i = 0; i < nr_irqs; i++) {
2616 struct irq_data *data = irq_domain_get_irq_data(domain,
2617 virq + i);
2618 struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
2619
2620 BUG_ON(vm != vpe->its_vm);
2621
2622 clear_bit(data->hwirq, vm->db_bitmap);
2623 its_vpe_teardown(vpe);
2624 irq_domain_reset_irq_data(data);
2625 }
2626
2627 if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
2628 its_lpi_free_chunks(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
2629 its_free_prop_table(vm->vprop_page);
2630 }
2631}
2632
2633static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2634 unsigned int nr_irqs, void *args)
2635{
2636 struct its_vm *vm = args;
2637 unsigned long *bitmap;
2638 struct page *vprop_page;
2639 int base, nr_ids, i, err = 0;
2640
2641 BUG_ON(!vm);
2642
2643 bitmap = its_lpi_alloc_chunks(nr_irqs, &base, &nr_ids);
2644 if (!bitmap)
2645 return -ENOMEM;
2646
2647 if (nr_ids < nr_irqs) {
2648 its_lpi_free_chunks(bitmap, base, nr_ids);
2649 return -ENOMEM;
2650 }
2651
2652 vprop_page = its_allocate_prop_table(GFP_KERNEL);
2653 if (!vprop_page) {
2654 its_lpi_free_chunks(bitmap, base, nr_ids);
2655 return -ENOMEM;
2656 }
2657
2658 vm->db_bitmap = bitmap;
2659 vm->db_lpi_base = base;
2660 vm->nr_db_lpis = nr_ids;
2661 vm->vprop_page = vprop_page;
2662
2663 for (i = 0; i < nr_irqs; i++) {
2664 vm->vpes[i]->vpe_db_lpi = base + i;
2665 err = its_vpe_init(vm->vpes[i]);
2666 if (err)
2667 break;
2668 err = its_irq_gic_domain_alloc(domain, virq + i,
2669 vm->vpes[i]->vpe_db_lpi);
2670 if (err)
2671 break;
2672 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
2673 &its_vpe_irq_chip, vm->vpes[i]);
2674 set_bit(i, bitmap);
2675 }
2676
2677 if (err) {
2678 if (i > 0)
2679 its_vpe_irq_domain_free(domain, virq, i - 1);
2680
2681 its_lpi_free_chunks(bitmap, base, nr_ids);
2682 its_free_prop_table(vprop_page);
2683 }
2684
2685 return err;
2686}
2687
Marc Zyngiereb781922016-12-20 14:47:05 +00002688static void its_vpe_irq_domain_activate(struct irq_domain *domain,
2689 struct irq_data *d)
2690{
2691 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2692
2693 /* Map the VPE to the first possible CPU */
2694 vpe->col_idx = cpumask_first(cpu_online_mask);
2695 its_send_vmapp(vpe, true);
2696 its_send_vinvall(vpe);
2697}
2698
2699static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
2700 struct irq_data *d)
2701{
2702 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2703
2704 its_send_vmapp(vpe, false);
2705}
2706
Marc Zyngier8fff27a2016-12-20 13:41:55 +00002707static const struct irq_domain_ops its_vpe_domain_ops = {
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002708 .alloc = its_vpe_irq_domain_alloc,
2709 .free = its_vpe_irq_domain_free,
Marc Zyngiereb781922016-12-20 14:47:05 +00002710 .activate = its_vpe_irq_domain_activate,
2711 .deactivate = its_vpe_irq_domain_deactivate,
Marc Zyngier8fff27a2016-12-20 13:41:55 +00002712};
2713
Yun Wu4559fbb2015-03-06 16:37:50 +00002714static int its_force_quiescent(void __iomem *base)
2715{
2716 u32 count = 1000000; /* 1s */
2717 u32 val;
2718
2719 val = readl_relaxed(base + GITS_CTLR);
David Daney7611da82016-08-18 15:41:58 -07002720 /*
2721 * GIC architecture specification requires the ITS to be both
2722 * disabled and quiescent for writes to GITS_BASER<n> or
2723 * GITS_CBASER to not have UNPREDICTABLE results.
2724 */
2725 if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
Yun Wu4559fbb2015-03-06 16:37:50 +00002726 return 0;
2727
2728 /* Disable the generation of all interrupts to this ITS */
Marc Zyngierd51c4b42017-06-27 21:24:25 +01002729 val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
Yun Wu4559fbb2015-03-06 16:37:50 +00002730 writel_relaxed(val, base + GITS_CTLR);
2731
2732 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
2733 while (1) {
2734 val = readl_relaxed(base + GITS_CTLR);
2735 if (val & GITS_CTLR_QUIESCENT)
2736 return 0;
2737
2738 count--;
2739 if (!count)
2740 return -EBUSY;
2741
2742 cpu_relax();
2743 udelay(1);
2744 }
2745}
2746
Robert Richter94100972015-09-21 22:58:38 +02002747static void __maybe_unused its_enable_quirk_cavium_22375(void *data)
2748{
2749 struct its_node *its = data;
2750
2751 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
2752}
2753
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02002754static void __maybe_unused its_enable_quirk_cavium_23144(void *data)
2755{
2756 struct its_node *its = data;
2757
2758 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
2759}
2760
Shanker Donthineni90922a22017-03-07 08:20:38 -06002761static void __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
2762{
2763 struct its_node *its = data;
2764
2765 /* On QDF2400, the size of the ITE is 16Bytes */
2766 its->ite_size = 16;
2767}
2768
Robert Richter67510cc2015-09-21 22:58:37 +02002769static const struct gic_quirk its_quirks[] = {
Robert Richter94100972015-09-21 22:58:38 +02002770#ifdef CONFIG_CAVIUM_ERRATUM_22375
2771 {
2772 .desc = "ITS: Cavium errata 22375, 24313",
2773 .iidr = 0xa100034c, /* ThunderX pass 1.x */
2774 .mask = 0xffff0fff,
2775 .init = its_enable_quirk_cavium_22375,
2776 },
2777#endif
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02002778#ifdef CONFIG_CAVIUM_ERRATUM_23144
2779 {
2780 .desc = "ITS: Cavium erratum 23144",
2781 .iidr = 0xa100034c, /* ThunderX pass 1.x */
2782 .mask = 0xffff0fff,
2783 .init = its_enable_quirk_cavium_23144,
2784 },
2785#endif
Shanker Donthineni90922a22017-03-07 08:20:38 -06002786#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
2787 {
2788 .desc = "ITS: QDF2400 erratum 0065",
2789 .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */
2790 .mask = 0xffffffff,
2791 .init = its_enable_quirk_qdf2400_e0065,
2792 },
2793#endif
Robert Richter67510cc2015-09-21 22:58:37 +02002794 {
2795 }
2796};
2797
2798static void its_enable_quirks(struct its_node *its)
2799{
2800 u32 iidr = readl_relaxed(its->base + GITS_IIDR);
2801
2802 gic_enable_quirks(iidr, its_quirks, its);
2803}
2804
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02002805static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02002806{
2807 struct irq_domain *inner_domain;
2808 struct msi_domain_info *info;
2809
2810 info = kzalloc(sizeof(*info), GFP_KERNEL);
2811 if (!info)
2812 return -ENOMEM;
2813
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02002814 inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its);
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02002815 if (!inner_domain) {
2816 kfree(info);
2817 return -ENOMEM;
2818 }
2819
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02002820 inner_domain->parent = its_parent;
Marc Zyngier96f0d932017-06-22 11:42:50 +01002821 irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
Eric Auger59768522017-01-19 20:58:00 +00002822 inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_REMAP;
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02002823 info->ops = &its_msi_domain_ops;
2824 info->data = its;
2825 inner_domain->host_data = info;
2826
2827 return 0;
2828}
2829
Marc Zyngier8fff27a2016-12-20 13:41:55 +00002830static int its_init_vpe_domain(void)
2831{
Marc Zyngier20b3d542016-12-20 15:23:22 +00002832 struct its_node *its;
2833 u32 devid;
2834 int entries;
2835
2836 if (gic_rdists->has_direct_lpi) {
2837 pr_info("ITS: Using DirectLPI for VPE invalidation\n");
2838 return 0;
2839 }
2840
2841 /* Any ITS will do, even if not v4 */
2842 its = list_first_entry(&its_nodes, struct its_node, entry);
2843
2844 entries = roundup_pow_of_two(nr_cpu_ids);
2845 vpe_proxy.vpes = kzalloc(sizeof(*vpe_proxy.vpes) * entries,
2846 GFP_KERNEL);
2847 if (!vpe_proxy.vpes) {
2848 pr_err("ITS: Can't allocate GICv4 proxy device array\n");
2849 return -ENOMEM;
2850 }
2851
2852 /* Use the last possible DevID */
2853 devid = GENMASK(its->device_ids - 1, 0);
2854 vpe_proxy.dev = its_create_device(its, devid, entries, false);
2855 if (!vpe_proxy.dev) {
2856 kfree(vpe_proxy.vpes);
2857 pr_err("ITS: Can't allocate GICv4 proxy device\n");
2858 return -ENOMEM;
2859 }
2860
Shanker Donthinenic427a472017-09-23 13:50:19 -05002861 BUG_ON(entries > vpe_proxy.dev->nr_ites);
Marc Zyngier20b3d542016-12-20 15:23:22 +00002862
2863 raw_spin_lock_init(&vpe_proxy.lock);
2864 vpe_proxy.next_victim = 0;
2865 pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
2866 devid, vpe_proxy.dev->nr_ites);
2867
Marc Zyngier8fff27a2016-12-20 13:41:55 +00002868 return 0;
2869}
2870
Marc Zyngier3dfa5762016-12-19 17:25:54 +00002871static int __init its_compute_its_list_map(struct resource *res,
2872 void __iomem *its_base)
2873{
2874 int its_number;
2875 u32 ctlr;
2876
2877 /*
2878 * This is assumed to be done early enough that we're
2879 * guaranteed to be single-threaded, hence no
2880 * locking. Should this change, we should address
2881 * this.
2882 */
2883 its_number = find_first_zero_bit(&its_list_map, ITS_LIST_MAX);
2884 if (its_number >= ITS_LIST_MAX) {
2885 pr_err("ITS@%pa: No ITSList entry available!\n",
2886 &res->start);
2887 return -EINVAL;
2888 }
2889
2890 ctlr = readl_relaxed(its_base + GITS_CTLR);
2891 ctlr &= ~GITS_CTLR_ITS_NUMBER;
2892 ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
2893 writel_relaxed(ctlr, its_base + GITS_CTLR);
2894 ctlr = readl_relaxed(its_base + GITS_CTLR);
2895 if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
2896 its_number = ctlr & GITS_CTLR_ITS_NUMBER;
2897 its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
2898 }
2899
2900 if (test_and_set_bit(its_number, &its_list_map)) {
2901 pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
2902 &res->start, its_number);
2903 return -EINVAL;
2904 }
2905
2906 return its_number;
2907}
2908
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02002909static int __init its_probe_one(struct resource *res,
2910 struct fwnode_handle *handle, int numa_node)
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00002911{
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00002912 struct its_node *its;
2913 void __iomem *its_base;
Marc Zyngier3dfa5762016-12-19 17:25:54 +00002914 u32 val, ctlr;
2915 u64 baser, tmp, typer;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00002916 int err;
2917
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02002918 its_base = ioremap(res->start, resource_size(res));
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00002919 if (!its_base) {
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02002920 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00002921 return -ENOMEM;
2922 }
2923
2924 val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
2925 if (val != 0x30 && val != 0x40) {
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02002926 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00002927 err = -ENODEV;
2928 goto out_unmap;
2929 }
2930
Yun Wu4559fbb2015-03-06 16:37:50 +00002931 err = its_force_quiescent(its_base);
2932 if (err) {
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02002933 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
Yun Wu4559fbb2015-03-06 16:37:50 +00002934 goto out_unmap;
2935 }
2936
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02002937 pr_info("ITS %pR\n", res);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00002938
2939 its = kzalloc(sizeof(*its), GFP_KERNEL);
2940 if (!its) {
2941 err = -ENOMEM;
2942 goto out_unmap;
2943 }
2944
2945 raw_spin_lock_init(&its->lock);
2946 INIT_LIST_HEAD(&its->entry);
2947 INIT_LIST_HEAD(&its->its_device_list);
Marc Zyngier3dfa5762016-12-19 17:25:54 +00002948 typer = gic_read_typer(its_base + GITS_TYPER);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00002949 its->base = its_base;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02002950 its->phys_base = res->start;
Marc Zyngier3dfa5762016-12-19 17:25:54 +00002951 its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer);
2952 its->is_v4 = !!(typer & GITS_TYPER_VLPIS);
2953 if (its->is_v4) {
2954 if (!(typer & GITS_TYPER_VMOVP)) {
2955 err = its_compute_its_list_map(res, its_base);
2956 if (err < 0)
2957 goto out_free_its;
2958
2959 pr_info("ITS@%pa: Using ITS number %d\n",
2960 &res->start, err);
2961 } else {
2962 pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
2963 }
2964 }
2965
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02002966 its->numa_node = numa_node;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00002967
Robert Richter5bc13c22017-02-01 18:38:25 +01002968 its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
2969 get_order(ITS_CMD_QUEUE_SZ));
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00002970 if (!its->cmd_base) {
2971 err = -ENOMEM;
2972 goto out_free_its;
2973 }
2974 its->cmd_write = its->cmd_base;
2975
Robert Richter67510cc2015-09-21 22:58:37 +02002976 its_enable_quirks(its);
2977
Shanker Donthineni0e0b0f62016-06-06 18:17:31 -05002978 err = its_alloc_tables(its);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00002979 if (err)
2980 goto out_free_cmd;
2981
2982 err = its_alloc_collections(its);
2983 if (err)
2984 goto out_free_tables;
2985
2986 baser = (virt_to_phys(its->cmd_base) |
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06002987 GITS_CBASER_RaWaWb |
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00002988 GITS_CBASER_InnerShareable |
2989 (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
2990 GITS_CBASER_VALID);
2991
Vladimir Murzin0968a612016-11-02 11:54:06 +00002992 gits_write_cbaser(baser, its->base + GITS_CBASER);
2993 tmp = gits_read_cbaser(its->base + GITS_CBASER);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00002994
Marc Zyngier4ad3e362015-03-27 14:15:04 +00002995 if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
Marc Zyngier241a3862015-03-27 14:15:05 +00002996 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
2997 /*
2998 * The HW reports non-shareable, we must
2999 * remove the cacheability attributes as
3000 * well.
3001 */
3002 baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
3003 GITS_CBASER_CACHEABILITY_MASK);
3004 baser |= GITS_CBASER_nC;
Vladimir Murzin0968a612016-11-02 11:54:06 +00003005 gits_write_cbaser(baser, its->base + GITS_CBASER);
Marc Zyngier241a3862015-03-27 14:15:05 +00003006 }
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003007 pr_info("ITS: using cache flushing for cmd queue\n");
3008 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
3009 }
3010
Vladimir Murzin0968a612016-11-02 11:54:06 +00003011 gits_write_cwriter(0, its->base + GITS_CWRITER);
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003012 ctlr = readl_relaxed(its->base + GITS_CTLR);
Marc Zyngierd51c4b42017-06-27 21:24:25 +01003013 ctlr |= GITS_CTLR_ENABLE;
3014 if (its->is_v4)
3015 ctlr |= GITS_CTLR_ImDe;
3016 writel_relaxed(ctlr, its->base + GITS_CTLR);
Marc Zyngier241a3862015-03-27 14:15:05 +00003017
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003018 err = its_init_domain(handle, its);
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003019 if (err)
3020 goto out_free_tables;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003021
3022 spin_lock(&its_lock);
3023 list_add(&its->entry, &its_nodes);
3024 spin_unlock(&its_lock);
3025
3026 return 0;
3027
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003028out_free_tables:
3029 its_free_tables(its);
3030out_free_cmd:
Robert Richter5bc13c22017-02-01 18:38:25 +01003031 free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003032out_free_its:
3033 kfree(its);
3034out_unmap:
3035 iounmap(its_base);
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003036 pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003037 return err;
3038}
3039
3040static bool gic_rdists_supports_plpis(void)
3041{
Marc Zyngier589ce5f2016-10-14 15:13:07 +01003042 return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003043}
3044
3045int its_cpu_init(void)
3046{
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003047 if (!list_empty(&its_nodes)) {
Vladimir Murzin16acae72015-03-06 16:37:40 +00003048 if (!gic_rdists_supports_plpis()) {
3049 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
3050 return -ENXIO;
3051 }
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003052 its_cpu_init_lpis();
3053 its_cpu_init_collection();
3054 }
3055
3056 return 0;
3057}
3058
Arvind Yadav935bba72017-06-22 16:05:30 +05303059static const struct of_device_id its_device_id[] = {
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003060 { .compatible = "arm,gic-v3-its", },
3061 {},
3062};
3063
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003064static int __init its_of_probe(struct device_node *node)
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003065{
3066 struct device_node *np;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003067 struct resource res;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003068
3069 for (np = of_find_matching_node(node, its_device_id); np;
3070 np = of_find_matching_node(np, its_device_id)) {
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003071 if (!of_property_read_bool(np, "msi-controller")) {
Rob Herringe81f54c2017-07-18 16:43:10 -05003072 pr_warn("%pOF: no msi-controller property, ITS ignored\n",
3073 np);
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003074 continue;
3075 }
3076
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003077 if (of_address_to_resource(np, 0, &res)) {
Rob Herringe81f54c2017-07-18 16:43:10 -05003078 pr_warn("%pOF: no regs?\n", np);
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003079 continue;
3080 }
3081
3082 its_probe_one(&res, &np->fwnode, of_node_to_nid(np));
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003083 }
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003084 return 0;
3085}
3086
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003087#ifdef CONFIG_ACPI
3088
3089#define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
3090
Robert Richterd1ce2632017-07-12 15:25:09 +02003091#ifdef CONFIG_ACPI_NUMA
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303092struct its_srat_map {
3093 /* numa node id */
3094 u32 numa_node;
3095 /* GIC ITS ID */
3096 u32 its_id;
3097};
3098
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003099static struct its_srat_map *its_srat_maps __initdata;
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303100static int its_in_srat __initdata;
3101
3102static int __init acpi_get_its_numa_node(u32 its_id)
3103{
3104 int i;
3105
3106 for (i = 0; i < its_in_srat; i++) {
3107 if (its_id == its_srat_maps[i].its_id)
3108 return its_srat_maps[i].numa_node;
3109 }
3110 return NUMA_NO_NODE;
3111}
3112
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003113static int __init gic_acpi_match_srat_its(struct acpi_subtable_header *header,
3114 const unsigned long end)
3115{
3116 return 0;
3117}
3118
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303119static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header,
3120 const unsigned long end)
3121{
3122 int node;
3123 struct acpi_srat_gic_its_affinity *its_affinity;
3124
3125 its_affinity = (struct acpi_srat_gic_its_affinity *)header;
3126 if (!its_affinity)
3127 return -EINVAL;
3128
3129 if (its_affinity->header.length < sizeof(*its_affinity)) {
3130 pr_err("SRAT: Invalid header length %d in ITS affinity\n",
3131 its_affinity->header.length);
3132 return -EINVAL;
3133 }
3134
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303135 node = acpi_map_pxm_to_node(its_affinity->proximity_domain);
3136
3137 if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
3138 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
3139 return 0;
3140 }
3141
3142 its_srat_maps[its_in_srat].numa_node = node;
3143 its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
3144 its_in_srat++;
3145 pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
3146 its_affinity->proximity_domain, its_affinity->its_id, node);
3147
3148 return 0;
3149}
3150
3151static void __init acpi_table_parse_srat_its(void)
3152{
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003153 int count;
3154
3155 count = acpi_table_parse_entries(ACPI_SIG_SRAT,
3156 sizeof(struct acpi_table_srat),
3157 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
3158 gic_acpi_match_srat_its, 0);
3159 if (count <= 0)
3160 return;
3161
3162 its_srat_maps = kmalloc(count * sizeof(struct its_srat_map),
3163 GFP_KERNEL);
3164 if (!its_srat_maps) {
3165 pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n");
3166 return;
3167 }
3168
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303169 acpi_table_parse_entries(ACPI_SIG_SRAT,
3170 sizeof(struct acpi_table_srat),
3171 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
3172 gic_acpi_parse_srat_its, 0);
3173}
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003174
3175/* free the its_srat_maps after ITS probing */
3176static void __init acpi_its_srat_maps_free(void)
3177{
3178 kfree(its_srat_maps);
3179}
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303180#else
3181static void __init acpi_table_parse_srat_its(void) { }
3182static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003183static void __init acpi_its_srat_maps_free(void) { }
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303184#endif
3185
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003186static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header,
3187 const unsigned long end)
3188{
3189 struct acpi_madt_generic_translator *its_entry;
3190 struct fwnode_handle *dom_handle;
3191 struct resource res;
3192 int err;
3193
3194 its_entry = (struct acpi_madt_generic_translator *)header;
3195 memset(&res, 0, sizeof(res));
3196 res.start = its_entry->base_address;
3197 res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
3198 res.flags = IORESOURCE_MEM;
3199
3200 dom_handle = irq_domain_alloc_fwnode((void *)its_entry->base_address);
3201 if (!dom_handle) {
3202 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
3203 &res.start);
3204 return -ENOMEM;
3205 }
3206
3207 err = iort_register_domain_token(its_entry->translation_id, dom_handle);
3208 if (err) {
3209 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
3210 &res.start, its_entry->translation_id);
3211 goto dom_err;
3212 }
3213
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303214 err = its_probe_one(&res, dom_handle,
3215 acpi_get_its_numa_node(its_entry->translation_id));
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003216 if (!err)
3217 return 0;
3218
3219 iort_deregister_domain_token(its_entry->translation_id);
3220dom_err:
3221 irq_domain_free_fwnode(dom_handle);
3222 return err;
3223}
3224
3225static void __init its_acpi_probe(void)
3226{
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303227 acpi_table_parse_srat_its();
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003228 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
3229 gic_acpi_parse_madt_its, 0);
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003230 acpi_its_srat_maps_free();
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003231}
3232#else
3233static void __init its_acpi_probe(void) { }
3234#endif
3235
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003236int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
3237 struct irq_domain *parent_domain)
3238{
3239 struct device_node *of_node;
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003240 struct its_node *its;
3241 bool has_v4 = false;
3242 int err;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003243
3244 its_parent = parent_domain;
3245 of_node = to_of_node(handle);
3246 if (of_node)
3247 its_of_probe(of_node);
3248 else
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003249 its_acpi_probe();
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003250
3251 if (list_empty(&its_nodes)) {
3252 pr_warn("ITS: No ITS available, not enabling LPIs\n");
3253 return -ENXIO;
3254 }
3255
3256 gic_rdists = rdists;
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003257 err = its_alloc_lpi_tables();
3258 if (err)
3259 return err;
3260
3261 list_for_each_entry(its, &its_nodes, entry)
3262 has_v4 |= its->is_v4;
3263
3264 if (has_v4 & rdists->has_vlpis) {
Marc Zyngier3d63cb52016-12-20 15:31:54 +00003265 if (its_init_vpe_domain() ||
3266 its_init_v4(parent_domain, &its_vpe_domain_ops)) {
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003267 rdists->has_vlpis = false;
3268 pr_err("ITS: Disabling GICv4 support\n");
3269 }
3270 }
3271
3272 return 0;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003273}