blob: 87da25815dac7db13a4384407be256c3c829dc76 [file] [log] [blame]
Will Deacon48ec83b2015-05-27 17:25:59 +01001/*
2 * IOMMU API for ARM architected SMMUv3 implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 * Copyright (C) 2015 ARM Limited
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 *
20 * This driver is powered by bad coffee and bombay mix.
21 */
22
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +000023#include <linux/acpi.h>
24#include <linux/acpi_iort.h>
Robin Murphy1cf9e542018-03-26 13:35:09 +010025#include <linux/bitops.h>
Will Deacon48ec83b2015-05-27 17:25:59 +010026#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000027#include <linux/dma-iommu.h>
Will Deacon48ec83b2015-05-27 17:25:59 +010028#include <linux/err.h>
29#include <linux/interrupt.h>
30#include <linux/iommu.h>
31#include <linux/iopoll.h>
32#include <linux/module.h>
Marc Zyngier166bdbd2015-10-13 18:32:30 +010033#include <linux/msi.h>
Will Deacon48ec83b2015-05-27 17:25:59 +010034#include <linux/of.h>
35#include <linux/of_address.h>
Robin Murphy8f785152016-09-12 17:13:45 +010036#include <linux/of_iommu.h>
Will Deacon941a8022015-08-11 16:25:10 +010037#include <linux/of_platform.h>
Will Deacon48ec83b2015-05-27 17:25:59 +010038#include <linux/pci.h>
39#include <linux/platform_device.h>
40
Robin Murphy08d4ca22016-09-12 17:13:46 +010041#include <linux/amba/bus.h>
42
Will Deacon48ec83b2015-05-27 17:25:59 +010043#include "io-pgtable.h"
44
45/* MMIO registers */
46#define ARM_SMMU_IDR0 0x0
47#define IDR0_ST_LVL_SHIFT 27
48#define IDR0_ST_LVL_MASK 0x3
49#define IDR0_ST_LVL_2LVL (1 << IDR0_ST_LVL_SHIFT)
Prem Mallappa6380be02015-12-14 22:01:23 +053050#define IDR0_STALL_MODEL_SHIFT 24
51#define IDR0_STALL_MODEL_MASK 0x3
52#define IDR0_STALL_MODEL_STALL (0 << IDR0_STALL_MODEL_SHIFT)
53#define IDR0_STALL_MODEL_FORCE (2 << IDR0_STALL_MODEL_SHIFT)
Will Deacon48ec83b2015-05-27 17:25:59 +010054#define IDR0_TTENDIAN_SHIFT 21
55#define IDR0_TTENDIAN_MASK 0x3
56#define IDR0_TTENDIAN_LE (2 << IDR0_TTENDIAN_SHIFT)
57#define IDR0_TTENDIAN_BE (3 << IDR0_TTENDIAN_SHIFT)
58#define IDR0_TTENDIAN_MIXED (0 << IDR0_TTENDIAN_SHIFT)
59#define IDR0_CD2L (1 << 19)
60#define IDR0_VMID16 (1 << 18)
61#define IDR0_PRI (1 << 16)
62#define IDR0_SEV (1 << 14)
63#define IDR0_MSI (1 << 13)
64#define IDR0_ASID16 (1 << 12)
65#define IDR0_ATS (1 << 10)
66#define IDR0_HYP (1 << 9)
67#define IDR0_COHACC (1 << 4)
68#define IDR0_TTF_SHIFT 2
69#define IDR0_TTF_MASK 0x3
70#define IDR0_TTF_AARCH64 (2 << IDR0_TTF_SHIFT)
Will Deaconf0c453d2015-08-20 12:12:32 +010071#define IDR0_TTF_AARCH32_64 (3 << IDR0_TTF_SHIFT)
Will Deacon48ec83b2015-05-27 17:25:59 +010072#define IDR0_S1P (1 << 1)
73#define IDR0_S2P (1 << 0)
74
75#define ARM_SMMU_IDR1 0x4
76#define IDR1_TABLES_PRESET (1 << 30)
77#define IDR1_QUEUES_PRESET (1 << 29)
78#define IDR1_REL (1 << 28)
79#define IDR1_CMDQ_SHIFT 21
80#define IDR1_CMDQ_MASK 0x1f
81#define IDR1_EVTQ_SHIFT 16
82#define IDR1_EVTQ_MASK 0x1f
83#define IDR1_PRIQ_SHIFT 11
84#define IDR1_PRIQ_MASK 0x1f
85#define IDR1_SSID_SHIFT 6
86#define IDR1_SSID_MASK 0x1f
87#define IDR1_SID_SHIFT 0
88#define IDR1_SID_MASK 0x3f
89
90#define ARM_SMMU_IDR5 0x14
91#define IDR5_STALL_MAX_SHIFT 16
92#define IDR5_STALL_MAX_MASK 0xffff
93#define IDR5_GRAN64K (1 << 6)
94#define IDR5_GRAN16K (1 << 5)
95#define IDR5_GRAN4K (1 << 4)
96#define IDR5_OAS_SHIFT 0
97#define IDR5_OAS_MASK 0x7
98#define IDR5_OAS_32_BIT (0 << IDR5_OAS_SHIFT)
99#define IDR5_OAS_36_BIT (1 << IDR5_OAS_SHIFT)
100#define IDR5_OAS_40_BIT (2 << IDR5_OAS_SHIFT)
101#define IDR5_OAS_42_BIT (3 << IDR5_OAS_SHIFT)
102#define IDR5_OAS_44_BIT (4 << IDR5_OAS_SHIFT)
103#define IDR5_OAS_48_BIT (5 << IDR5_OAS_SHIFT)
104
105#define ARM_SMMU_CR0 0x20
106#define CR0_CMDQEN (1 << 3)
107#define CR0_EVTQEN (1 << 2)
108#define CR0_PRIQEN (1 << 1)
109#define CR0_SMMUEN (1 << 0)
110
111#define ARM_SMMU_CR0ACK 0x24
112
113#define ARM_SMMU_CR1 0x28
114#define CR1_SH_NSH 0
115#define CR1_SH_OSH 2
116#define CR1_SH_ISH 3
117#define CR1_CACHE_NC 0
118#define CR1_CACHE_WB 1
119#define CR1_CACHE_WT 2
120#define CR1_TABLE_SH_SHIFT 10
121#define CR1_TABLE_OC_SHIFT 8
122#define CR1_TABLE_IC_SHIFT 6
123#define CR1_QUEUE_SH_SHIFT 4
124#define CR1_QUEUE_OC_SHIFT 2
125#define CR1_QUEUE_IC_SHIFT 0
126
127#define ARM_SMMU_CR2 0x2c
128#define CR2_PTM (1 << 2)
129#define CR2_RECINVSID (1 << 1)
130#define CR2_E2H (1 << 0)
131
Robin Murphydc87a982016-09-12 17:13:44 +0100132#define ARM_SMMU_GBPA 0x44
133#define GBPA_ABORT (1 << 20)
134#define GBPA_UPDATE (1 << 31)
135
Will Deacon48ec83b2015-05-27 17:25:59 +0100136#define ARM_SMMU_IRQ_CTRL 0x50
137#define IRQ_CTRL_EVTQ_IRQEN (1 << 2)
Marc Zyngierccd63852015-07-15 11:55:18 +0100138#define IRQ_CTRL_PRIQ_IRQEN (1 << 1)
Will Deacon48ec83b2015-05-27 17:25:59 +0100139#define IRQ_CTRL_GERROR_IRQEN (1 << 0)
140
141#define ARM_SMMU_IRQ_CTRLACK 0x54
142
143#define ARM_SMMU_GERROR 0x60
144#define GERROR_SFM_ERR (1 << 8)
145#define GERROR_MSI_GERROR_ABT_ERR (1 << 7)
146#define GERROR_MSI_PRIQ_ABT_ERR (1 << 6)
147#define GERROR_MSI_EVTQ_ABT_ERR (1 << 5)
148#define GERROR_MSI_CMDQ_ABT_ERR (1 << 4)
149#define GERROR_PRIQ_ABT_ERR (1 << 3)
150#define GERROR_EVTQ_ABT_ERR (1 << 2)
151#define GERROR_CMDQ_ERR (1 << 0)
152#define GERROR_ERR_MASK 0xfd
153
154#define ARM_SMMU_GERRORN 0x64
155
156#define ARM_SMMU_GERROR_IRQ_CFG0 0x68
157#define ARM_SMMU_GERROR_IRQ_CFG1 0x70
158#define ARM_SMMU_GERROR_IRQ_CFG2 0x74
159
160#define ARM_SMMU_STRTAB_BASE 0x80
161#define STRTAB_BASE_RA (1UL << 62)
Robin Murphy1cf9e542018-03-26 13:35:09 +0100162#define STRTAB_BASE_ADDR_MASK GENMASK_ULL(47, 6)
Will Deacon48ec83b2015-05-27 17:25:59 +0100163
164#define ARM_SMMU_STRTAB_BASE_CFG 0x88
165#define STRTAB_BASE_CFG_LOG2SIZE_SHIFT 0
166#define STRTAB_BASE_CFG_LOG2SIZE_MASK 0x3f
167#define STRTAB_BASE_CFG_SPLIT_SHIFT 6
168#define STRTAB_BASE_CFG_SPLIT_MASK 0x1f
169#define STRTAB_BASE_CFG_FMT_SHIFT 16
170#define STRTAB_BASE_CFG_FMT_MASK 0x3
171#define STRTAB_BASE_CFG_FMT_LINEAR (0 << STRTAB_BASE_CFG_FMT_SHIFT)
172#define STRTAB_BASE_CFG_FMT_2LVL (1 << STRTAB_BASE_CFG_FMT_SHIFT)
173
174#define ARM_SMMU_CMDQ_BASE 0x90
175#define ARM_SMMU_CMDQ_PROD 0x98
176#define ARM_SMMU_CMDQ_CONS 0x9c
177
178#define ARM_SMMU_EVTQ_BASE 0xa0
179#define ARM_SMMU_EVTQ_PROD 0x100a8
180#define ARM_SMMU_EVTQ_CONS 0x100ac
181#define ARM_SMMU_EVTQ_IRQ_CFG0 0xb0
182#define ARM_SMMU_EVTQ_IRQ_CFG1 0xb8
183#define ARM_SMMU_EVTQ_IRQ_CFG2 0xbc
184
185#define ARM_SMMU_PRIQ_BASE 0xc0
186#define ARM_SMMU_PRIQ_PROD 0x100c8
187#define ARM_SMMU_PRIQ_CONS 0x100cc
188#define ARM_SMMU_PRIQ_IRQ_CFG0 0xd0
189#define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8
190#define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc
191
192/* Common MSI config fields */
Robin Murphy1cf9e542018-03-26 13:35:09 +0100193#define MSI_CFG0_ADDR_MASK GENMASK_ULL(47, 2)
Marc Zyngierec11d632015-07-15 11:55:19 +0100194#define MSI_CFG2_SH_SHIFT 4
195#define MSI_CFG2_SH_NSH (0UL << MSI_CFG2_SH_SHIFT)
196#define MSI_CFG2_SH_OSH (2UL << MSI_CFG2_SH_SHIFT)
197#define MSI_CFG2_SH_ISH (3UL << MSI_CFG2_SH_SHIFT)
198#define MSI_CFG2_MEMATTR_SHIFT 0
199#define MSI_CFG2_MEMATTR_DEVICE_nGnRE (0x1 << MSI_CFG2_MEMATTR_SHIFT)
Will Deacon48ec83b2015-05-27 17:25:59 +0100200
201#define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1))
202#define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift))
203#define Q_OVERFLOW_FLAG (1 << 31)
204#define Q_OVF(q, p) ((p) & Q_OVERFLOW_FLAG)
205#define Q_ENT(q, p) ((q)->base + \
206 Q_IDX(q, p) * (q)->ent_dwords)
207
208#define Q_BASE_RWA (1UL << 62)
Robin Murphy1cf9e542018-03-26 13:35:09 +0100209#define Q_BASE_ADDR_MASK GENMASK_ULL(47, 5)
Will Deacon48ec83b2015-05-27 17:25:59 +0100210#define Q_BASE_LOG2SIZE_SHIFT 0
211#define Q_BASE_LOG2SIZE_MASK 0x1fUL
212
213/*
214 * Stream table.
215 *
216 * Linear: Enough to cover 1 << IDR1.SIDSIZE entries
Zhen Leie2f4c232015-07-07 04:30:17 +0100217 * 2lvl: 128k L1 entries,
218 * 256 lazy entries per table (each table covers a PCI bus)
Will Deacon48ec83b2015-05-27 17:25:59 +0100219 */
Zhen Leie2f4c232015-07-07 04:30:17 +0100220#define STRTAB_L1_SZ_SHIFT 20
Will Deacon48ec83b2015-05-27 17:25:59 +0100221#define STRTAB_SPLIT 8
222
223#define STRTAB_L1_DESC_DWORDS 1
224#define STRTAB_L1_DESC_SPAN_SHIFT 0
225#define STRTAB_L1_DESC_SPAN_MASK 0x1fUL
Robin Murphy1cf9e542018-03-26 13:35:09 +0100226#define STRTAB_L1_DESC_L2PTR_MASK GENMASK_ULL(47, 6)
Will Deacon48ec83b2015-05-27 17:25:59 +0100227
228#define STRTAB_STE_DWORDS 8
229#define STRTAB_STE_0_V (1UL << 0)
230#define STRTAB_STE_0_CFG_SHIFT 1
231#define STRTAB_STE_0_CFG_MASK 0x7UL
232#define STRTAB_STE_0_CFG_ABORT (0UL << STRTAB_STE_0_CFG_SHIFT)
233#define STRTAB_STE_0_CFG_BYPASS (4UL << STRTAB_STE_0_CFG_SHIFT)
234#define STRTAB_STE_0_CFG_S1_TRANS (5UL << STRTAB_STE_0_CFG_SHIFT)
235#define STRTAB_STE_0_CFG_S2_TRANS (6UL << STRTAB_STE_0_CFG_SHIFT)
236
237#define STRTAB_STE_0_S1FMT_SHIFT 4
238#define STRTAB_STE_0_S1FMT_LINEAR (0UL << STRTAB_STE_0_S1FMT_SHIFT)
Robin Murphy1cf9e542018-03-26 13:35:09 +0100239#define STRTAB_STE_0_S1CTXPTR_MASK GENMASK_ULL(47, 6)
Will Deacon48ec83b2015-05-27 17:25:59 +0100240#define STRTAB_STE_0_S1CDMAX_SHIFT 59
241#define STRTAB_STE_0_S1CDMAX_MASK 0x1fUL
242
243#define STRTAB_STE_1_S1C_CACHE_NC 0UL
244#define STRTAB_STE_1_S1C_CACHE_WBRA 1UL
245#define STRTAB_STE_1_S1C_CACHE_WT 2UL
246#define STRTAB_STE_1_S1C_CACHE_WB 3UL
247#define STRTAB_STE_1_S1C_SH_NSH 0UL
248#define STRTAB_STE_1_S1C_SH_OSH 2UL
249#define STRTAB_STE_1_S1C_SH_ISH 3UL
250#define STRTAB_STE_1_S1CIR_SHIFT 2
251#define STRTAB_STE_1_S1COR_SHIFT 4
252#define STRTAB_STE_1_S1CSH_SHIFT 6
253
254#define STRTAB_STE_1_S1STALLD (1UL << 27)
255
256#define STRTAB_STE_1_EATS_ABT 0UL
257#define STRTAB_STE_1_EATS_TRANS 1UL
258#define STRTAB_STE_1_EATS_S1CHK 2UL
259#define STRTAB_STE_1_EATS_SHIFT 28
260
261#define STRTAB_STE_1_STRW_NSEL1 0UL
262#define STRTAB_STE_1_STRW_EL2 2UL
263#define STRTAB_STE_1_STRW_SHIFT 30
264
Will Deacona0eacd82015-11-18 18:15:51 +0000265#define STRTAB_STE_1_SHCFG_INCOMING 1UL
266#define STRTAB_STE_1_SHCFG_SHIFT 44
267
Will Deacon48ec83b2015-05-27 17:25:59 +0100268#define STRTAB_STE_2_S2VMID_SHIFT 0
269#define STRTAB_STE_2_S2VMID_MASK 0xffffUL
270#define STRTAB_STE_2_VTCR_SHIFT 32
271#define STRTAB_STE_2_VTCR_MASK 0x7ffffUL
272#define STRTAB_STE_2_S2AA64 (1UL << 51)
273#define STRTAB_STE_2_S2ENDI (1UL << 52)
274#define STRTAB_STE_2_S2PTW (1UL << 54)
275#define STRTAB_STE_2_S2R (1UL << 58)
276
Robin Murphy1cf9e542018-03-26 13:35:09 +0100277#define STRTAB_STE_3_S2TTB_MASK GENMASK_ULL(47, 4)
Will Deacon48ec83b2015-05-27 17:25:59 +0100278
279/* Context descriptor (stage-1 only) */
280#define CTXDESC_CD_DWORDS 8
281#define CTXDESC_CD_0_TCR_T0SZ_SHIFT 0
282#define ARM64_TCR_T0SZ_SHIFT 0
283#define ARM64_TCR_T0SZ_MASK 0x1fUL
284#define CTXDESC_CD_0_TCR_TG0_SHIFT 6
285#define ARM64_TCR_TG0_SHIFT 14
286#define ARM64_TCR_TG0_MASK 0x3UL
287#define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8
Zhen Lei5d58c622015-06-26 09:32:59 +0100288#define ARM64_TCR_IRGN0_SHIFT 8
Will Deacon48ec83b2015-05-27 17:25:59 +0100289#define ARM64_TCR_IRGN0_MASK 0x3UL
290#define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10
Zhen Lei5d58c622015-06-26 09:32:59 +0100291#define ARM64_TCR_ORGN0_SHIFT 10
Will Deacon48ec83b2015-05-27 17:25:59 +0100292#define ARM64_TCR_ORGN0_MASK 0x3UL
293#define CTXDESC_CD_0_TCR_SH0_SHIFT 12
294#define ARM64_TCR_SH0_SHIFT 12
295#define ARM64_TCR_SH0_MASK 0x3UL
296#define CTXDESC_CD_0_TCR_EPD0_SHIFT 14
297#define ARM64_TCR_EPD0_SHIFT 7
298#define ARM64_TCR_EPD0_MASK 0x1UL
299#define CTXDESC_CD_0_TCR_EPD1_SHIFT 30
300#define ARM64_TCR_EPD1_SHIFT 23
301#define ARM64_TCR_EPD1_MASK 0x1UL
302
303#define CTXDESC_CD_0_ENDI (1UL << 15)
304#define CTXDESC_CD_0_V (1UL << 31)
305
306#define CTXDESC_CD_0_TCR_IPS_SHIFT 32
307#define ARM64_TCR_IPS_SHIFT 32
308#define ARM64_TCR_IPS_MASK 0x7UL
309#define CTXDESC_CD_0_TCR_TBI0_SHIFT 38
310#define ARM64_TCR_TBI0_SHIFT 37
311#define ARM64_TCR_TBI0_MASK 0x1UL
312
313#define CTXDESC_CD_0_AA64 (1UL << 41)
Yisheng Xie9cff86fd22017-09-21 20:36:07 +0800314#define CTXDESC_CD_0_S (1UL << 44)
Will Deacon48ec83b2015-05-27 17:25:59 +0100315#define CTXDESC_CD_0_R (1UL << 45)
316#define CTXDESC_CD_0_A (1UL << 46)
317#define CTXDESC_CD_0_ASET_SHIFT 47
318#define CTXDESC_CD_0_ASET_SHARED (0UL << CTXDESC_CD_0_ASET_SHIFT)
319#define CTXDESC_CD_0_ASET_PRIVATE (1UL << CTXDESC_CD_0_ASET_SHIFT)
320#define CTXDESC_CD_0_ASID_SHIFT 48
321#define CTXDESC_CD_0_ASID_MASK 0xffffUL
322
Robin Murphy1cf9e542018-03-26 13:35:09 +0100323#define CTXDESC_CD_1_TTB0_MASK GENMASK_ULL(47, 4)
Will Deacon48ec83b2015-05-27 17:25:59 +0100324
325#define CTXDESC_CD_3_MAIR_SHIFT 0
326
327/* Convert between AArch64 (CPU) TCR format and SMMU CD format */
328#define ARM_SMMU_TCR2CD(tcr, fld) \
329 (((tcr) >> ARM64_TCR_##fld##_SHIFT & ARM64_TCR_##fld##_MASK) \
330 << CTXDESC_CD_0_TCR_##fld##_SHIFT)
331
332/* Command queue */
333#define CMDQ_ENT_DWORDS 2
334#define CMDQ_MAX_SZ_SHIFT 8
335
336#define CMDQ_ERR_SHIFT 24
337#define CMDQ_ERR_MASK 0x7f
338#define CMDQ_ERR_CERROR_NONE_IDX 0
339#define CMDQ_ERR_CERROR_ILL_IDX 1
340#define CMDQ_ERR_CERROR_ABT_IDX 2
341
342#define CMDQ_0_OP_SHIFT 0
343#define CMDQ_0_OP_MASK 0xffUL
344#define CMDQ_0_SSV (1UL << 11)
345
346#define CMDQ_PREFETCH_0_SID_SHIFT 32
347#define CMDQ_PREFETCH_1_SIZE_SHIFT 0
Robin Murphy1cf9e542018-03-26 13:35:09 +0100348#define CMDQ_PREFETCH_1_ADDR_MASK GENMASK_ULL(63, 12)
Will Deacon48ec83b2015-05-27 17:25:59 +0100349
350#define CMDQ_CFGI_0_SID_SHIFT 32
351#define CMDQ_CFGI_0_SID_MASK 0xffffffffUL
352#define CMDQ_CFGI_1_LEAF (1UL << 0)
353#define CMDQ_CFGI_1_RANGE_SHIFT 0
354#define CMDQ_CFGI_1_RANGE_MASK 0x1fUL
355
356#define CMDQ_TLBI_0_VMID_SHIFT 32
357#define CMDQ_TLBI_0_ASID_SHIFT 48
358#define CMDQ_TLBI_1_LEAF (1UL << 0)
Robin Murphy1cf9e542018-03-26 13:35:09 +0100359#define CMDQ_TLBI_1_VA_MASK GENMASK_ULL(63, 12)
360#define CMDQ_TLBI_1_IPA_MASK GENMASK_ULL(47, 12)
Will Deacon48ec83b2015-05-27 17:25:59 +0100361
362#define CMDQ_PRI_0_SSID_SHIFT 12
363#define CMDQ_PRI_0_SSID_MASK 0xfffffUL
364#define CMDQ_PRI_0_SID_SHIFT 32
365#define CMDQ_PRI_0_SID_MASK 0xffffffffUL
366#define CMDQ_PRI_1_GRPID_SHIFT 0
367#define CMDQ_PRI_1_GRPID_MASK 0x1ffUL
368#define CMDQ_PRI_1_RESP_SHIFT 12
369#define CMDQ_PRI_1_RESP_DENY (0UL << CMDQ_PRI_1_RESP_SHIFT)
370#define CMDQ_PRI_1_RESP_FAIL (1UL << CMDQ_PRI_1_RESP_SHIFT)
371#define CMDQ_PRI_1_RESP_SUCC (2UL << CMDQ_PRI_1_RESP_SHIFT)
372
373#define CMDQ_SYNC_0_CS_SHIFT 12
374#define CMDQ_SYNC_0_CS_NONE (0UL << CMDQ_SYNC_0_CS_SHIFT)
Robin Murphy37de98f2017-10-18 15:04:26 +0100375#define CMDQ_SYNC_0_CS_IRQ (1UL << CMDQ_SYNC_0_CS_SHIFT)
Will Deacon48ec83b2015-05-27 17:25:59 +0100376#define CMDQ_SYNC_0_CS_SEV (2UL << CMDQ_SYNC_0_CS_SHIFT)
Robin Murphy37de98f2017-10-18 15:04:26 +0100377#define CMDQ_SYNC_0_MSH_SHIFT 22
378#define CMDQ_SYNC_0_MSH_ISH (3UL << CMDQ_SYNC_0_MSH_SHIFT)
379#define CMDQ_SYNC_0_MSIATTR_SHIFT 24
380#define CMDQ_SYNC_0_MSIATTR_OIWB (0xfUL << CMDQ_SYNC_0_MSIATTR_SHIFT)
381#define CMDQ_SYNC_0_MSIDATA_SHIFT 32
382#define CMDQ_SYNC_0_MSIDATA_MASK 0xffffffffUL
Robin Murphy1cf9e542018-03-26 13:35:09 +0100383#define CMDQ_SYNC_1_MSIADDR_MASK GENMASK_ULL(47, 2)
Will Deacon48ec83b2015-05-27 17:25:59 +0100384
385/* Event queue */
386#define EVTQ_ENT_DWORDS 4
387#define EVTQ_MAX_SZ_SHIFT 7
388
389#define EVTQ_0_ID_SHIFT 0
390#define EVTQ_0_ID_MASK 0xffUL
391
392/* PRI queue */
393#define PRIQ_ENT_DWORDS 2
394#define PRIQ_MAX_SZ_SHIFT 8
395
396#define PRIQ_0_SID_SHIFT 0
397#define PRIQ_0_SID_MASK 0xffffffffUL
398#define PRIQ_0_SSID_SHIFT 32
399#define PRIQ_0_SSID_MASK 0xfffffUL
Will Deacon48ec83b2015-05-27 17:25:59 +0100400#define PRIQ_0_PERM_PRIV (1UL << 58)
401#define PRIQ_0_PERM_EXEC (1UL << 59)
402#define PRIQ_0_PERM_READ (1UL << 60)
403#define PRIQ_0_PERM_WRITE (1UL << 61)
404#define PRIQ_0_PRG_LAST (1UL << 62)
405#define PRIQ_0_SSID_V (1UL << 63)
406
407#define PRIQ_1_PRG_IDX_SHIFT 0
408#define PRIQ_1_PRG_IDX_MASK 0x1ffUL
Robin Murphy1cf9e542018-03-26 13:35:09 +0100409#define PRIQ_1_ADDR_MASK GENMASK_ULL(63, 12)
Will Deacon48ec83b2015-05-27 17:25:59 +0100410
411/* High-level queue structures */
412#define ARM_SMMU_POLL_TIMEOUT_US 100
Will Deacona529ea12017-10-19 16:49:30 +0100413#define ARM_SMMU_CMDQ_SYNC_TIMEOUT_US 1000000 /* 1s! */
Robin Murphy8ff0f722017-10-18 15:04:28 +0100414#define ARM_SMMU_CMDQ_SYNC_SPIN_COUNT 10
Will Deacon48ec83b2015-05-27 17:25:59 +0100415
Eric Auger50019f02017-01-19 20:57:56 +0000416#define MSI_IOVA_BASE 0x8000000
417#define MSI_IOVA_LENGTH 0x100000
418
Will Deacon48ec83b2015-05-27 17:25:59 +0100419static bool disable_bypass;
420module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
421MODULE_PARM_DESC(disable_bypass,
422 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
423
424enum pri_resp {
425 PRI_RESP_DENY,
426 PRI_RESP_FAIL,
427 PRI_RESP_SUCC,
428};
429
Marc Zyngier166bdbd2015-10-13 18:32:30 +0100430enum arm_smmu_msi_index {
431 EVTQ_MSI_INDEX,
432 GERROR_MSI_INDEX,
433 PRIQ_MSI_INDEX,
434 ARM_SMMU_MAX_MSIS,
435};
436
437static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = {
438 [EVTQ_MSI_INDEX] = {
439 ARM_SMMU_EVTQ_IRQ_CFG0,
440 ARM_SMMU_EVTQ_IRQ_CFG1,
441 ARM_SMMU_EVTQ_IRQ_CFG2,
442 },
443 [GERROR_MSI_INDEX] = {
444 ARM_SMMU_GERROR_IRQ_CFG0,
445 ARM_SMMU_GERROR_IRQ_CFG1,
446 ARM_SMMU_GERROR_IRQ_CFG2,
447 },
448 [PRIQ_MSI_INDEX] = {
449 ARM_SMMU_PRIQ_IRQ_CFG0,
450 ARM_SMMU_PRIQ_IRQ_CFG1,
451 ARM_SMMU_PRIQ_IRQ_CFG2,
452 },
453};
454
Will Deacon48ec83b2015-05-27 17:25:59 +0100455struct arm_smmu_cmdq_ent {
456 /* Common fields */
457 u8 opcode;
458 bool substream_valid;
459
460 /* Command-specific fields */
461 union {
462 #define CMDQ_OP_PREFETCH_CFG 0x1
463 struct {
464 u32 sid;
465 u8 size;
466 u64 addr;
467 } prefetch;
468
469 #define CMDQ_OP_CFGI_STE 0x3
470 #define CMDQ_OP_CFGI_ALL 0x4
471 struct {
472 u32 sid;
473 union {
474 bool leaf;
475 u8 span;
476 };
477 } cfgi;
478
479 #define CMDQ_OP_TLBI_NH_ASID 0x11
480 #define CMDQ_OP_TLBI_NH_VA 0x12
481 #define CMDQ_OP_TLBI_EL2_ALL 0x20
482 #define CMDQ_OP_TLBI_S12_VMALL 0x28
483 #define CMDQ_OP_TLBI_S2_IPA 0x2a
484 #define CMDQ_OP_TLBI_NSNH_ALL 0x30
485 struct {
486 u16 asid;
487 u16 vmid;
488 bool leaf;
489 u64 addr;
490 } tlbi;
491
492 #define CMDQ_OP_PRI_RESP 0x41
493 struct {
494 u32 sid;
495 u32 ssid;
496 u16 grpid;
497 enum pri_resp resp;
498 } pri;
499
500 #define CMDQ_OP_CMD_SYNC 0x46
Robin Murphy37de98f2017-10-18 15:04:26 +0100501 struct {
502 u32 msidata;
503 u64 msiaddr;
504 } sync;
Will Deacon48ec83b2015-05-27 17:25:59 +0100505 };
506};
507
508struct arm_smmu_queue {
509 int irq; /* Wired interrupt */
510
511 __le64 *base;
512 dma_addr_t base_dma;
513 u64 q_base;
514
515 size_t ent_dwords;
516 u32 max_n_shift;
517 u32 prod;
518 u32 cons;
519
520 u32 __iomem *prod_reg;
521 u32 __iomem *cons_reg;
522};
523
524struct arm_smmu_cmdq {
525 struct arm_smmu_queue q;
526 spinlock_t lock;
527};
528
529struct arm_smmu_evtq {
530 struct arm_smmu_queue q;
531 u32 max_stalls;
532};
533
534struct arm_smmu_priq {
535 struct arm_smmu_queue q;
536};
537
538/* High-level stream table and context descriptor structures */
539struct arm_smmu_strtab_l1_desc {
540 u8 span;
541
542 __le64 *l2ptr;
543 dma_addr_t l2ptr_dma;
544};
545
546struct arm_smmu_s1_cfg {
547 __le64 *cdptr;
548 dma_addr_t cdptr_dma;
549
550 struct arm_smmu_ctx_desc {
551 u16 asid;
552 u64 ttbr;
553 u64 tcr;
554 u64 mair;
555 } cd;
556};
557
558struct arm_smmu_s2_cfg {
559 u16 vmid;
560 u64 vttbr;
561 u64 vtcr;
562};
563
564struct arm_smmu_strtab_ent {
Will Deaconbeb3c6a2017-01-06 16:27:30 +0000565 /*
566 * An STE is "assigned" if the master emitting the corresponding SID
567 * is attached to a domain. The behaviour of an unassigned STE is
568 * determined by the disable_bypass parameter, whereas an assigned
569 * STE behaves according to s1_cfg/s2_cfg, which themselves are
570 * configured according to the domain type.
571 */
572 bool assigned;
Will Deacon48ec83b2015-05-27 17:25:59 +0100573 struct arm_smmu_s1_cfg *s1_cfg;
574 struct arm_smmu_s2_cfg *s2_cfg;
575};
576
577struct arm_smmu_strtab_cfg {
578 __le64 *strtab;
579 dma_addr_t strtab_dma;
580 struct arm_smmu_strtab_l1_desc *l1_desc;
581 unsigned int num_l1_ents;
582
583 u64 strtab_base;
584 u32 strtab_base_cfg;
585};
586
587/* An SMMUv3 instance */
588struct arm_smmu_device {
589 struct device *dev;
590 void __iomem *base;
591
592#define ARM_SMMU_FEAT_2_LVL_STRTAB (1 << 0)
593#define ARM_SMMU_FEAT_2_LVL_CDTAB (1 << 1)
594#define ARM_SMMU_FEAT_TT_LE (1 << 2)
595#define ARM_SMMU_FEAT_TT_BE (1 << 3)
596#define ARM_SMMU_FEAT_PRI (1 << 4)
597#define ARM_SMMU_FEAT_ATS (1 << 5)
598#define ARM_SMMU_FEAT_SEV (1 << 6)
599#define ARM_SMMU_FEAT_MSI (1 << 7)
600#define ARM_SMMU_FEAT_COHERENCY (1 << 8)
601#define ARM_SMMU_FEAT_TRANS_S1 (1 << 9)
602#define ARM_SMMU_FEAT_TRANS_S2 (1 << 10)
603#define ARM_SMMU_FEAT_STALLS (1 << 11)
604#define ARM_SMMU_FEAT_HYP (1 << 12)
Yisheng Xie9cff86fd22017-09-21 20:36:07 +0800605#define ARM_SMMU_FEAT_STALL_FORCE (1 << 13)
Will Deacon48ec83b2015-05-27 17:25:59 +0100606 u32 features;
607
Zhen Lei5e929462015-07-07 04:30:18 +0100608#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
Linu Cheriane5b829d2017-06-22 17:35:37 +0530609#define ARM_SMMU_OPT_PAGE0_REGS_ONLY (1 << 1)
Zhen Lei5e929462015-07-07 04:30:18 +0100610 u32 options;
611
Will Deacon48ec83b2015-05-27 17:25:59 +0100612 struct arm_smmu_cmdq cmdq;
613 struct arm_smmu_evtq evtq;
614 struct arm_smmu_priq priq;
615
616 int gerr_irq;
Geetha Sowjanyaf9354482017-06-23 19:04:36 +0530617 int combined_irq;
Robin Murphy37de98f2017-10-18 15:04:26 +0100618 atomic_t sync_nr;
Will Deacon48ec83b2015-05-27 17:25:59 +0100619
620 unsigned long ias; /* IPA */
621 unsigned long oas; /* PA */
Robin Murphyd5466352016-05-09 17:20:09 +0100622 unsigned long pgsize_bitmap;
Will Deacon48ec83b2015-05-27 17:25:59 +0100623
624#define ARM_SMMU_MAX_ASIDS (1 << 16)
625 unsigned int asid_bits;
626 DECLARE_BITMAP(asid_map, ARM_SMMU_MAX_ASIDS);
627
628#define ARM_SMMU_MAX_VMIDS (1 << 16)
629 unsigned int vmid_bits;
630 DECLARE_BITMAP(vmid_map, ARM_SMMU_MAX_VMIDS);
631
632 unsigned int ssid_bits;
633 unsigned int sid_bits;
634
635 struct arm_smmu_strtab_cfg strtab_cfg;
Joerg Roedel9648cbc2017-02-01 18:11:36 +0100636
Robin Murphy37de98f2017-10-18 15:04:26 +0100637 u32 sync_count;
638
Joerg Roedel9648cbc2017-02-01 18:11:36 +0100639 /* IOMMU core code handle */
640 struct iommu_device iommu;
Will Deacon48ec83b2015-05-27 17:25:59 +0100641};
642
Robin Murphy8f785152016-09-12 17:13:45 +0100643/* SMMU private data for each master */
644struct arm_smmu_master_data {
Will Deacon48ec83b2015-05-27 17:25:59 +0100645 struct arm_smmu_device *smmu;
Will Deacon48ec83b2015-05-27 17:25:59 +0100646 struct arm_smmu_strtab_ent ste;
647};
648
649/* SMMU private data for an IOMMU domain */
650enum arm_smmu_domain_stage {
651 ARM_SMMU_DOMAIN_S1 = 0,
652 ARM_SMMU_DOMAIN_S2,
653 ARM_SMMU_DOMAIN_NESTED,
Will Deaconbeb3c6a2017-01-06 16:27:30 +0000654 ARM_SMMU_DOMAIN_BYPASS,
Will Deacon48ec83b2015-05-27 17:25:59 +0100655};
656
657struct arm_smmu_domain {
658 struct arm_smmu_device *smmu;
659 struct mutex init_mutex; /* Protects smmu pointer */
660
661 struct io_pgtable_ops *pgtbl_ops;
Will Deacon48ec83b2015-05-27 17:25:59 +0100662
663 enum arm_smmu_domain_stage stage;
664 union {
665 struct arm_smmu_s1_cfg s1_cfg;
666 struct arm_smmu_s2_cfg s2_cfg;
667 };
668
669 struct iommu_domain domain;
670};
671
Zhen Lei5e929462015-07-07 04:30:18 +0100672struct arm_smmu_option_prop {
673 u32 opt;
674 const char *prop;
675};
676
677static struct arm_smmu_option_prop arm_smmu_options[] = {
678 { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
Linu Cheriane5b829d2017-06-22 17:35:37 +0530679 { ARM_SMMU_OPT_PAGE0_REGS_ONLY, "cavium,cn9900-broken-page1-regspace"},
Zhen Lei5e929462015-07-07 04:30:18 +0100680 { 0, NULL},
681};
682
Linu Cheriane5b829d2017-06-22 17:35:37 +0530683static inline void __iomem *arm_smmu_page1_fixup(unsigned long offset,
684 struct arm_smmu_device *smmu)
685{
686 if ((offset > SZ_64K) &&
687 (smmu->options & ARM_SMMU_OPT_PAGE0_REGS_ONLY))
688 offset -= SZ_64K;
689
690 return smmu->base + offset;
691}
692
Will Deacon48ec83b2015-05-27 17:25:59 +0100693static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
694{
695 return container_of(dom, struct arm_smmu_domain, domain);
696}
697
Zhen Lei5e929462015-07-07 04:30:18 +0100698static void parse_driver_options(struct arm_smmu_device *smmu)
699{
700 int i = 0;
701
702 do {
703 if (of_property_read_bool(smmu->dev->of_node,
704 arm_smmu_options[i].prop)) {
705 smmu->options |= arm_smmu_options[i].opt;
706 dev_notice(smmu->dev, "option %s\n",
707 arm_smmu_options[i].prop);
708 }
709 } while (arm_smmu_options[++i].opt);
710}
711
Will Deacon48ec83b2015-05-27 17:25:59 +0100712/* Low-level queue manipulation functions */
713static bool queue_full(struct arm_smmu_queue *q)
714{
715 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
716 Q_WRP(q, q->prod) != Q_WRP(q, q->cons);
717}
718
719static bool queue_empty(struct arm_smmu_queue *q)
720{
721 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
722 Q_WRP(q, q->prod) == Q_WRP(q, q->cons);
723}
724
725static void queue_sync_cons(struct arm_smmu_queue *q)
726{
727 q->cons = readl_relaxed(q->cons_reg);
728}
729
730static void queue_inc_cons(struct arm_smmu_queue *q)
731{
732 u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
733
734 q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
735 writel(q->cons, q->cons_reg);
736}
737
738static int queue_sync_prod(struct arm_smmu_queue *q)
739{
740 int ret = 0;
741 u32 prod = readl_relaxed(q->prod_reg);
742
743 if (Q_OVF(q, prod) != Q_OVF(q, q->prod))
744 ret = -EOVERFLOW;
745
746 q->prod = prod;
747 return ret;
748}
749
750static void queue_inc_prod(struct arm_smmu_queue *q)
751{
752 u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + 1;
753
754 q->prod = Q_OVF(q, q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod);
755 writel(q->prod, q->prod_reg);
756}
757
Jean-Philippe Bruckerbcfced12016-09-05 14:09:53 +0100758/*
759 * Wait for the SMMU to consume items. If drain is true, wait until the queue
760 * is empty. Otherwise, wait until there is at least one free slot.
761 */
Will Deacona529ea12017-10-19 16:49:30 +0100762static int queue_poll_cons(struct arm_smmu_queue *q, bool sync, bool wfe)
Will Deacon48ec83b2015-05-27 17:25:59 +0100763{
Sunil Gouthamb847de42017-05-05 16:47:46 +0530764 ktime_t timeout;
Robin Murphy8ff0f722017-10-18 15:04:28 +0100765 unsigned int delay = 1, spin_cnt = 0;
Sunil Gouthamb847de42017-05-05 16:47:46 +0530766
Will Deacona529ea12017-10-19 16:49:30 +0100767 /* Wait longer if it's a CMD_SYNC */
768 timeout = ktime_add_us(ktime_get(), sync ?
769 ARM_SMMU_CMDQ_SYNC_TIMEOUT_US :
Sunil Gouthamb847de42017-05-05 16:47:46 +0530770 ARM_SMMU_POLL_TIMEOUT_US);
Will Deacon48ec83b2015-05-27 17:25:59 +0100771
Will Deacona529ea12017-10-19 16:49:30 +0100772 while (queue_sync_cons(q), (sync ? !queue_empty(q) : queue_full(q))) {
Will Deacon48ec83b2015-05-27 17:25:59 +0100773 if (ktime_compare(ktime_get(), timeout) > 0)
774 return -ETIMEDOUT;
775
776 if (wfe) {
777 wfe();
Robin Murphy8ff0f722017-10-18 15:04:28 +0100778 } else if (++spin_cnt < ARM_SMMU_CMDQ_SYNC_SPIN_COUNT) {
Will Deacon48ec83b2015-05-27 17:25:59 +0100779 cpu_relax();
Robin Murphy8ff0f722017-10-18 15:04:28 +0100780 continue;
781 } else {
Sunil Gouthamb847de42017-05-05 16:47:46 +0530782 udelay(delay);
783 delay *= 2;
Robin Murphy8ff0f722017-10-18 15:04:28 +0100784 spin_cnt = 0;
Will Deacon48ec83b2015-05-27 17:25:59 +0100785 }
786 }
787
788 return 0;
789}
790
791static void queue_write(__le64 *dst, u64 *src, size_t n_dwords)
792{
793 int i;
794
795 for (i = 0; i < n_dwords; ++i)
796 *dst++ = cpu_to_le64(*src++);
797}
798
799static int queue_insert_raw(struct arm_smmu_queue *q, u64 *ent)
800{
801 if (queue_full(q))
802 return -ENOSPC;
803
804 queue_write(Q_ENT(q, q->prod), ent, q->ent_dwords);
805 queue_inc_prod(q);
806 return 0;
807}
808
809static void queue_read(__le64 *dst, u64 *src, size_t n_dwords)
810{
811 int i;
812
813 for (i = 0; i < n_dwords; ++i)
814 *dst++ = le64_to_cpu(*src++);
815}
816
817static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
818{
819 if (queue_empty(q))
820 return -EAGAIN;
821
822 queue_read(ent, Q_ENT(q, q->cons), q->ent_dwords);
823 queue_inc_cons(q);
824 return 0;
825}
826
827/* High-level queue accessors */
828static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
829{
830 memset(cmd, 0, CMDQ_ENT_DWORDS << 3);
831 cmd[0] |= (ent->opcode & CMDQ_0_OP_MASK) << CMDQ_0_OP_SHIFT;
832
833 switch (ent->opcode) {
834 case CMDQ_OP_TLBI_EL2_ALL:
835 case CMDQ_OP_TLBI_NSNH_ALL:
836 break;
837 case CMDQ_OP_PREFETCH_CFG:
838 cmd[0] |= (u64)ent->prefetch.sid << CMDQ_PREFETCH_0_SID_SHIFT;
839 cmd[1] |= ent->prefetch.size << CMDQ_PREFETCH_1_SIZE_SHIFT;
840 cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK;
841 break;
842 case CMDQ_OP_CFGI_STE:
843 cmd[0] |= (u64)ent->cfgi.sid << CMDQ_CFGI_0_SID_SHIFT;
844 cmd[1] |= ent->cfgi.leaf ? CMDQ_CFGI_1_LEAF : 0;
845 break;
846 case CMDQ_OP_CFGI_ALL:
847 /* Cover the entire SID range */
848 cmd[1] |= CMDQ_CFGI_1_RANGE_MASK << CMDQ_CFGI_1_RANGE_SHIFT;
849 break;
850 case CMDQ_OP_TLBI_NH_VA:
851 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
Will Deacon1c27df12015-09-18 16:12:56 +0100852 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
853 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
854 break;
Will Deacon48ec83b2015-05-27 17:25:59 +0100855 case CMDQ_OP_TLBI_S2_IPA:
856 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
857 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
Will Deacon1c27df12015-09-18 16:12:56 +0100858 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK;
Will Deacon48ec83b2015-05-27 17:25:59 +0100859 break;
860 case CMDQ_OP_TLBI_NH_ASID:
861 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
862 /* Fallthrough */
863 case CMDQ_OP_TLBI_S12_VMALL:
864 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
865 break;
866 case CMDQ_OP_PRI_RESP:
867 cmd[0] |= ent->substream_valid ? CMDQ_0_SSV : 0;
868 cmd[0] |= ent->pri.ssid << CMDQ_PRI_0_SSID_SHIFT;
869 cmd[0] |= (u64)ent->pri.sid << CMDQ_PRI_0_SID_SHIFT;
870 cmd[1] |= ent->pri.grpid << CMDQ_PRI_1_GRPID_SHIFT;
871 switch (ent->pri.resp) {
872 case PRI_RESP_DENY:
873 cmd[1] |= CMDQ_PRI_1_RESP_DENY;
874 break;
875 case PRI_RESP_FAIL:
876 cmd[1] |= CMDQ_PRI_1_RESP_FAIL;
877 break;
878 case PRI_RESP_SUCC:
879 cmd[1] |= CMDQ_PRI_1_RESP_SUCC;
880 break;
881 default:
882 return -EINVAL;
883 }
884 break;
885 case CMDQ_OP_CMD_SYNC:
Robin Murphy37de98f2017-10-18 15:04:26 +0100886 if (ent->sync.msiaddr)
887 cmd[0] |= CMDQ_SYNC_0_CS_IRQ;
888 else
889 cmd[0] |= CMDQ_SYNC_0_CS_SEV;
890 cmd[0] |= CMDQ_SYNC_0_MSH_ISH | CMDQ_SYNC_0_MSIATTR_OIWB;
891 cmd[0] |= (u64)ent->sync.msidata << CMDQ_SYNC_0_MSIDATA_SHIFT;
892 cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK;
Will Deacon48ec83b2015-05-27 17:25:59 +0100893 break;
894 default:
895 return -ENOENT;
896 }
897
898 return 0;
899}
900
901static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
902{
903 static const char *cerror_str[] = {
904 [CMDQ_ERR_CERROR_NONE_IDX] = "No error",
905 [CMDQ_ERR_CERROR_ILL_IDX] = "Illegal command",
906 [CMDQ_ERR_CERROR_ABT_IDX] = "Abort on command fetch",
907 };
908
909 int i;
910 u64 cmd[CMDQ_ENT_DWORDS];
911 struct arm_smmu_queue *q = &smmu->cmdq.q;
912 u32 cons = readl_relaxed(q->cons_reg);
913 u32 idx = cons >> CMDQ_ERR_SHIFT & CMDQ_ERR_MASK;
914 struct arm_smmu_cmdq_ent cmd_sync = {
915 .opcode = CMDQ_OP_CMD_SYNC,
916 };
917
918 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons,
Will Deacona0d5c042015-12-04 12:00:29 +0000919 idx < ARRAY_SIZE(cerror_str) ? cerror_str[idx] : "Unknown");
Will Deacon48ec83b2015-05-27 17:25:59 +0100920
921 switch (idx) {
Will Deacon48ec83b2015-05-27 17:25:59 +0100922 case CMDQ_ERR_CERROR_ABT_IDX:
923 dev_err(smmu->dev, "retrying command fetch\n");
924 case CMDQ_ERR_CERROR_NONE_IDX:
925 return;
Will Deacona0d5c042015-12-04 12:00:29 +0000926 case CMDQ_ERR_CERROR_ILL_IDX:
927 /* Fallthrough */
928 default:
929 break;
Will Deacon48ec83b2015-05-27 17:25:59 +0100930 }
931
932 /*
933 * We may have concurrent producers, so we need to be careful
934 * not to touch any of the shadow cmdq state.
935 */
Will Deaconaea20372016-07-29 11:15:37 +0100936 queue_read(cmd, Q_ENT(q, cons), q->ent_dwords);
Will Deacon48ec83b2015-05-27 17:25:59 +0100937 dev_err(smmu->dev, "skipping command in error state:\n");
938 for (i = 0; i < ARRAY_SIZE(cmd); ++i)
939 dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
940
941 /* Convert the erroneous command into a CMD_SYNC */
942 if (arm_smmu_cmdq_build_cmd(cmd, &cmd_sync)) {
943 dev_err(smmu->dev, "failed to convert to CMD_SYNC\n");
944 return;
945 }
946
Will Deaconaea20372016-07-29 11:15:37 +0100947 queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
Will Deacon48ec83b2015-05-27 17:25:59 +0100948}
949
Robin Murphy2f657ad2017-08-31 14:44:25 +0100950static void arm_smmu_cmdq_insert_cmd(struct arm_smmu_device *smmu, u64 *cmd)
951{
952 struct arm_smmu_queue *q = &smmu->cmdq.q;
953 bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
954
955 while (queue_insert_raw(q, cmd) == -ENOSPC) {
956 if (queue_poll_cons(q, false, wfe))
957 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
958 }
959}
960
Will Deacon48ec83b2015-05-27 17:25:59 +0100961static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
962 struct arm_smmu_cmdq_ent *ent)
963{
Will Deacon48ec83b2015-05-27 17:25:59 +0100964 u64 cmd[CMDQ_ENT_DWORDS];
Will Deacon8ded2902016-09-09 14:33:59 +0100965 unsigned long flags;
Will Deacon48ec83b2015-05-27 17:25:59 +0100966
967 if (arm_smmu_cmdq_build_cmd(cmd, ent)) {
968 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
969 ent->opcode);
970 return;
971 }
972
Will Deacon8ded2902016-09-09 14:33:59 +0100973 spin_lock_irqsave(&smmu->cmdq.lock, flags);
Robin Murphy2f657ad2017-08-31 14:44:25 +0100974 arm_smmu_cmdq_insert_cmd(smmu, cmd);
Will Deacon8ded2902016-09-09 14:33:59 +0100975 spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
Will Deacon48ec83b2015-05-27 17:25:59 +0100976}
977
Robin Murphy37de98f2017-10-18 15:04:26 +0100978/*
979 * The difference between val and sync_idx is bounded by the maximum size of
980 * a queue at 2^20 entries, so 32 bits is plenty for wrap-safe arithmetic.
981 */
Will Deacon49806592017-10-19 16:41:53 +0100982static int __arm_smmu_sync_poll_msi(struct arm_smmu_device *smmu, u32 sync_idx)
Robin Murphy37de98f2017-10-18 15:04:26 +0100983{
Will Deacona529ea12017-10-19 16:49:30 +0100984 ktime_t timeout;
985 u32 val;
986
987 timeout = ktime_add_us(ktime_get(), ARM_SMMU_CMDQ_SYNC_TIMEOUT_US);
988 val = smp_cond_load_acquire(&smmu->sync_count,
989 (int)(VAL - sync_idx) >= 0 ||
990 !ktime_before(ktime_get(), timeout));
Robin Murphy37de98f2017-10-18 15:04:26 +0100991
992 return (int)(val - sync_idx) < 0 ? -ETIMEDOUT : 0;
993}
994
Will Deacon49806592017-10-19 16:41:53 +0100995static int __arm_smmu_cmdq_issue_sync_msi(struct arm_smmu_device *smmu)
Robin Murphy2f657ad2017-08-31 14:44:25 +0100996{
997 u64 cmd[CMDQ_ENT_DWORDS];
998 unsigned long flags;
Will Deacon49806592017-10-19 16:41:53 +0100999 struct arm_smmu_cmdq_ent ent = {
1000 .opcode = CMDQ_OP_CMD_SYNC,
1001 .sync = {
1002 .msidata = atomic_inc_return_relaxed(&smmu->sync_nr),
1003 .msiaddr = virt_to_phys(&smmu->sync_count),
1004 },
1005 };
Robin Murphy2f657ad2017-08-31 14:44:25 +01001006
1007 arm_smmu_cmdq_build_cmd(cmd, &ent);
1008
1009 spin_lock_irqsave(&smmu->cmdq.lock, flags);
1010 arm_smmu_cmdq_insert_cmd(smmu, cmd);
Robin Murphy2f657ad2017-08-31 14:44:25 +01001011 spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
1012
Will Deacon49806592017-10-19 16:41:53 +01001013 return __arm_smmu_sync_poll_msi(smmu, ent.sync.msidata);
1014}
1015
1016static int __arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
1017{
1018 u64 cmd[CMDQ_ENT_DWORDS];
1019 unsigned long flags;
1020 bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
1021 struct arm_smmu_cmdq_ent ent = { .opcode = CMDQ_OP_CMD_SYNC };
1022 int ret;
1023
1024 arm_smmu_cmdq_build_cmd(cmd, &ent);
1025
1026 spin_lock_irqsave(&smmu->cmdq.lock, flags);
1027 arm_smmu_cmdq_insert_cmd(smmu, cmd);
1028 ret = queue_poll_cons(&smmu->cmdq.q, true, wfe);
1029 spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
1030
1031 return ret;
1032}
1033
1034static void arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
1035{
1036 int ret;
1037 bool msi = (smmu->features & ARM_SMMU_FEAT_MSI) &&
1038 (smmu->features & ARM_SMMU_FEAT_COHERENCY);
1039
1040 ret = msi ? __arm_smmu_cmdq_issue_sync_msi(smmu)
1041 : __arm_smmu_cmdq_issue_sync(smmu);
Robin Murphy2f657ad2017-08-31 14:44:25 +01001042 if (ret)
1043 dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
1044}
1045
Will Deacon48ec83b2015-05-27 17:25:59 +01001046/* Context descriptor manipulation functions */
1047static u64 arm_smmu_cpu_tcr_to_cd(u64 tcr)
1048{
1049 u64 val = 0;
1050
1051 /* Repack the TCR. Just care about TTBR0 for now */
1052 val |= ARM_SMMU_TCR2CD(tcr, T0SZ);
1053 val |= ARM_SMMU_TCR2CD(tcr, TG0);
1054 val |= ARM_SMMU_TCR2CD(tcr, IRGN0);
1055 val |= ARM_SMMU_TCR2CD(tcr, ORGN0);
1056 val |= ARM_SMMU_TCR2CD(tcr, SH0);
1057 val |= ARM_SMMU_TCR2CD(tcr, EPD0);
1058 val |= ARM_SMMU_TCR2CD(tcr, EPD1);
1059 val |= ARM_SMMU_TCR2CD(tcr, IPS);
1060 val |= ARM_SMMU_TCR2CD(tcr, TBI0);
1061
1062 return val;
1063}
1064
1065static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu,
1066 struct arm_smmu_s1_cfg *cfg)
1067{
1068 u64 val;
1069
1070 /*
1071 * We don't need to issue any invalidation here, as we'll invalidate
1072 * the STE when installing the new entry anyway.
1073 */
1074 val = arm_smmu_cpu_tcr_to_cd(cfg->cd.tcr) |
1075#ifdef __BIG_ENDIAN
1076 CTXDESC_CD_0_ENDI |
1077#endif
1078 CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET_PRIVATE |
1079 CTXDESC_CD_0_AA64 | (u64)cfg->cd.asid << CTXDESC_CD_0_ASID_SHIFT |
1080 CTXDESC_CD_0_V;
Yisheng Xie9cff86fd22017-09-21 20:36:07 +08001081
1082 /* STALL_MODEL==0b10 && CD.S==0 is ILLEGAL */
1083 if (smmu->features & ARM_SMMU_FEAT_STALL_FORCE)
1084 val |= CTXDESC_CD_0_S;
1085
Will Deacon48ec83b2015-05-27 17:25:59 +01001086 cfg->cdptr[0] = cpu_to_le64(val);
1087
Robin Murphy1cf9e542018-03-26 13:35:09 +01001088 val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK;
Will Deacon48ec83b2015-05-27 17:25:59 +01001089 cfg->cdptr[1] = cpu_to_le64(val);
1090
1091 cfg->cdptr[3] = cpu_to_le64(cfg->cd.mair << CTXDESC_CD_3_MAIR_SHIFT);
1092}
1093
1094/* Stream table manipulation functions */
1095static void
1096arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
1097{
1098 u64 val = 0;
1099
1100 val |= (desc->span & STRTAB_L1_DESC_SPAN_MASK)
1101 << STRTAB_L1_DESC_SPAN_SHIFT;
Robin Murphy1cf9e542018-03-26 13:35:09 +01001102 val |= desc->l2ptr_dma & STRTAB_L1_DESC_L2PTR_MASK;
Will Deacon48ec83b2015-05-27 17:25:59 +01001103
1104 *dst = cpu_to_le64(val);
1105}
1106
1107static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
1108{
1109 struct arm_smmu_cmdq_ent cmd = {
1110 .opcode = CMDQ_OP_CFGI_STE,
1111 .cfgi = {
1112 .sid = sid,
1113 .leaf = true,
1114 },
1115 };
1116
1117 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
Robin Murphy2f657ad2017-08-31 14:44:25 +01001118 arm_smmu_cmdq_issue_sync(smmu);
Will Deacon48ec83b2015-05-27 17:25:59 +01001119}
1120
1121static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
1122 __le64 *dst, struct arm_smmu_strtab_ent *ste)
1123{
1124 /*
1125 * This is hideously complicated, but we only really care about
1126 * three cases at the moment:
1127 *
Will Deaconbeb3c6a2017-01-06 16:27:30 +00001128 * 1. Invalid (all zero) -> bypass/fault (init)
1129 * 2. Bypass/fault -> translation/bypass (attach)
1130 * 3. Translation/bypass -> bypass/fault (detach)
Will Deacon48ec83b2015-05-27 17:25:59 +01001131 *
1132 * Given that we can't update the STE atomically and the SMMU
1133 * doesn't read the thing in a defined order, that leaves us
1134 * with the following maintenance requirements:
1135 *
1136 * 1. Update Config, return (init time STEs aren't live)
1137 * 2. Write everything apart from dword 0, sync, write dword 0, sync
1138 * 3. Update Config, sync
1139 */
1140 u64 val = le64_to_cpu(dst[0]);
1141 bool ste_live = false;
1142 struct arm_smmu_cmdq_ent prefetch_cmd = {
1143 .opcode = CMDQ_OP_PREFETCH_CFG,
1144 .prefetch = {
1145 .sid = sid,
1146 },
1147 };
1148
1149 if (val & STRTAB_STE_0_V) {
1150 u64 cfg;
1151
1152 cfg = val & STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT;
1153 switch (cfg) {
1154 case STRTAB_STE_0_CFG_BYPASS:
1155 break;
1156 case STRTAB_STE_0_CFG_S1_TRANS:
1157 case STRTAB_STE_0_CFG_S2_TRANS:
1158 ste_live = true;
1159 break;
Will Deacon5bc0a112016-08-16 14:29:16 +01001160 case STRTAB_STE_0_CFG_ABORT:
1161 if (disable_bypass)
1162 break;
Will Deacon48ec83b2015-05-27 17:25:59 +01001163 default:
1164 BUG(); /* STE corruption */
1165 }
1166 }
1167
Nate Watterson810871c2016-12-20 23:11:48 -05001168 /* Nuke the existing STE_0 value, as we're going to rewrite it */
Will Deaconbeb3c6a2017-01-06 16:27:30 +00001169 val = STRTAB_STE_0_V;
Will Deacon48ec83b2015-05-27 17:25:59 +01001170
Will Deaconbeb3c6a2017-01-06 16:27:30 +00001171 /* Bypass/fault */
1172 if (!ste->assigned || !(ste->s1_cfg || ste->s2_cfg)) {
1173 if (!ste->assigned && disable_bypass)
1174 val |= STRTAB_STE_0_CFG_ABORT;
1175 else
1176 val |= STRTAB_STE_0_CFG_BYPASS;
1177
Will Deacon48ec83b2015-05-27 17:25:59 +01001178 dst[0] = cpu_to_le64(val);
Will Deacona0eacd82015-11-18 18:15:51 +00001179 dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING
1180 << STRTAB_STE_1_SHCFG_SHIFT);
Will Deacon48ec83b2015-05-27 17:25:59 +01001181 dst[2] = 0; /* Nuke the VMID */
Will Deacon704c0382017-10-05 16:49:37 +01001182 /*
1183 * The SMMU can perform negative caching, so we must sync
1184 * the STE regardless of whether the old value was live.
1185 */
1186 if (smmu)
Will Deacon48ec83b2015-05-27 17:25:59 +01001187 arm_smmu_sync_ste_for_sid(smmu, sid);
1188 return;
1189 }
1190
1191 if (ste->s1_cfg) {
1192 BUG_ON(ste_live);
1193 dst[1] = cpu_to_le64(
1194 STRTAB_STE_1_S1C_CACHE_WBRA
1195 << STRTAB_STE_1_S1CIR_SHIFT |
1196 STRTAB_STE_1_S1C_CACHE_WBRA
1197 << STRTAB_STE_1_S1COR_SHIFT |
1198 STRTAB_STE_1_S1C_SH_ISH << STRTAB_STE_1_S1CSH_SHIFT |
Will Deacon48ec83b2015-05-27 17:25:59 +01001199#ifdef CONFIG_PCI_ATS
1200 STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT |
1201#endif
Robin Murphy14b4dba2017-01-06 18:58:16 +05301202 STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT);
Will Deacon48ec83b2015-05-27 17:25:59 +01001203
Yisheng Xie9cff86fd22017-09-21 20:36:07 +08001204 if (smmu->features & ARM_SMMU_FEAT_STALLS &&
1205 !(smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
Prem Mallappa6380be02015-12-14 22:01:23 +05301206 dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
1207
Robin Murphy1cf9e542018-03-26 13:35:09 +01001208 val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
Will Deacon48ec83b2015-05-27 17:25:59 +01001209 STRTAB_STE_0_CFG_S1_TRANS;
Will Deacon48ec83b2015-05-27 17:25:59 +01001210 }
1211
1212 if (ste->s2_cfg) {
1213 BUG_ON(ste_live);
1214 dst[2] = cpu_to_le64(
1215 ste->s2_cfg->vmid << STRTAB_STE_2_S2VMID_SHIFT |
1216 (ste->s2_cfg->vtcr & STRTAB_STE_2_VTCR_MASK)
1217 << STRTAB_STE_2_VTCR_SHIFT |
1218#ifdef __BIG_ENDIAN
1219 STRTAB_STE_2_S2ENDI |
1220#endif
1221 STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
1222 STRTAB_STE_2_S2R);
1223
1224 dst[3] = cpu_to_le64(ste->s2_cfg->vttbr &
Robin Murphy1cf9e542018-03-26 13:35:09 +01001225 STRTAB_STE_3_S2TTB_MASK);
Will Deacon48ec83b2015-05-27 17:25:59 +01001226
1227 val |= STRTAB_STE_0_CFG_S2_TRANS;
1228 }
1229
1230 arm_smmu_sync_ste_for_sid(smmu, sid);
1231 dst[0] = cpu_to_le64(val);
1232 arm_smmu_sync_ste_for_sid(smmu, sid);
1233
1234 /* It's likely that we'll want to use the new STE soon */
Zhen Lei5e929462015-07-07 04:30:18 +01001235 if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
1236 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
Will Deacon48ec83b2015-05-27 17:25:59 +01001237}
1238
1239static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
1240{
1241 unsigned int i;
Will Deaconbeb3c6a2017-01-06 16:27:30 +00001242 struct arm_smmu_strtab_ent ste = { .assigned = false };
Will Deacon48ec83b2015-05-27 17:25:59 +01001243
1244 for (i = 0; i < nent; ++i) {
1245 arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste);
1246 strtab += STRTAB_STE_DWORDS;
1247 }
1248}
1249
1250static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
1251{
1252 size_t size;
1253 void *strtab;
1254 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1255 struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[sid >> STRTAB_SPLIT];
1256
1257 if (desc->l2ptr)
1258 return 0;
1259
1260 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
Zhen Lei69146e72015-06-26 09:32:58 +01001261 strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
Will Deacon48ec83b2015-05-27 17:25:59 +01001262
1263 desc->span = STRTAB_SPLIT + 1;
Will Deacon04fa26c2015-10-30 18:12:41 +00001264 desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
1265 GFP_KERNEL | __GFP_ZERO);
Will Deacon48ec83b2015-05-27 17:25:59 +01001266 if (!desc->l2ptr) {
1267 dev_err(smmu->dev,
1268 "failed to allocate l2 stream table for SID %u\n",
1269 sid);
1270 return -ENOMEM;
1271 }
1272
1273 arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
1274 arm_smmu_write_strtab_l1_desc(strtab, desc);
1275 return 0;
1276}
1277
1278/* IRQ and event handlers */
1279static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
1280{
1281 int i;
1282 struct arm_smmu_device *smmu = dev;
1283 struct arm_smmu_queue *q = &smmu->evtq.q;
1284 u64 evt[EVTQ_ENT_DWORDS];
1285
Jean-Philippe Bruckerb4163fb2016-08-22 14:42:24 +01001286 do {
1287 while (!queue_remove_raw(q, evt)) {
1288 u8 id = evt[0] >> EVTQ_0_ID_SHIFT & EVTQ_0_ID_MASK;
Will Deacon48ec83b2015-05-27 17:25:59 +01001289
Jean-Philippe Bruckerb4163fb2016-08-22 14:42:24 +01001290 dev_info(smmu->dev, "event 0x%02x received:\n", id);
1291 for (i = 0; i < ARRAY_SIZE(evt); ++i)
1292 dev_info(smmu->dev, "\t0x%016llx\n",
1293 (unsigned long long)evt[i]);
1294
1295 }
1296
1297 /*
1298 * Not much we can do on overflow, so scream and pretend we're
1299 * trying harder.
1300 */
1301 if (queue_sync_prod(q) == -EOVERFLOW)
1302 dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
1303 } while (!queue_empty(q));
Will Deacon48ec83b2015-05-27 17:25:59 +01001304
1305 /* Sync our overflow flag, as we believe we're up to speed */
1306 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1307 return IRQ_HANDLED;
1308}
1309
Jean-Philippe Bruckerb4163fb2016-08-22 14:42:24 +01001310static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt)
Will Deacon48ec83b2015-05-27 17:25:59 +01001311{
Jean-Philippe Bruckerb4163fb2016-08-22 14:42:24 +01001312 u32 sid, ssid;
1313 u16 grpid;
1314 bool ssv, last;
Will Deacon48ec83b2015-05-27 17:25:59 +01001315
Jean-Philippe Bruckerb4163fb2016-08-22 14:42:24 +01001316 sid = evt[0] >> PRIQ_0_SID_SHIFT & PRIQ_0_SID_MASK;
1317 ssv = evt[0] & PRIQ_0_SSID_V;
1318 ssid = ssv ? evt[0] >> PRIQ_0_SSID_SHIFT & PRIQ_0_SSID_MASK : 0;
1319 last = evt[0] & PRIQ_0_PRG_LAST;
1320 grpid = evt[1] >> PRIQ_1_PRG_IDX_SHIFT & PRIQ_1_PRG_IDX_MASK;
Will Deacon48ec83b2015-05-27 17:25:59 +01001321
Jean-Philippe Bruckerb4163fb2016-08-22 14:42:24 +01001322 dev_info(smmu->dev, "unexpected PRI request received:\n");
1323 dev_info(smmu->dev,
1324 "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n",
1325 sid, ssid, grpid, last ? "L" : "",
1326 evt[0] & PRIQ_0_PERM_PRIV ? "" : "un",
1327 evt[0] & PRIQ_0_PERM_READ ? "R" : "",
1328 evt[0] & PRIQ_0_PERM_WRITE ? "W" : "",
1329 evt[0] & PRIQ_0_PERM_EXEC ? "X" : "",
Robin Murphy1cf9e542018-03-26 13:35:09 +01001330 evt[1] & PRIQ_1_ADDR_MASK);
Jean-Philippe Bruckerb4163fb2016-08-22 14:42:24 +01001331
1332 if (last) {
1333 struct arm_smmu_cmdq_ent cmd = {
1334 .opcode = CMDQ_OP_PRI_RESP,
1335 .substream_valid = ssv,
1336 .pri = {
1337 .sid = sid,
1338 .ssid = ssid,
1339 .grpid = grpid,
1340 .resp = PRI_RESP_DENY,
1341 },
1342 };
1343
1344 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1345 }
Will Deacon48ec83b2015-05-27 17:25:59 +01001346}
1347
1348static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
1349{
1350 struct arm_smmu_device *smmu = dev;
1351 struct arm_smmu_queue *q = &smmu->priq.q;
1352 u64 evt[PRIQ_ENT_DWORDS];
1353
Jean-Philippe Bruckerb4163fb2016-08-22 14:42:24 +01001354 do {
1355 while (!queue_remove_raw(q, evt))
1356 arm_smmu_handle_ppr(smmu, evt);
Will Deacon48ec83b2015-05-27 17:25:59 +01001357
Jean-Philippe Bruckerb4163fb2016-08-22 14:42:24 +01001358 if (queue_sync_prod(q) == -EOVERFLOW)
1359 dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
1360 } while (!queue_empty(q));
Will Deacon48ec83b2015-05-27 17:25:59 +01001361
1362 /* Sync our overflow flag, as we believe we're up to speed */
1363 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1364 return IRQ_HANDLED;
1365}
1366
Will Deacon48ec83b2015-05-27 17:25:59 +01001367static int arm_smmu_device_disable(struct arm_smmu_device *smmu);
1368
1369static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev)
1370{
Prem Mallappa324ba102015-12-14 22:01:14 +05301371 u32 gerror, gerrorn, active;
Will Deacon48ec83b2015-05-27 17:25:59 +01001372 struct arm_smmu_device *smmu = dev;
1373
1374 gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR);
1375 gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN);
1376
Prem Mallappa324ba102015-12-14 22:01:14 +05301377 active = gerror ^ gerrorn;
1378 if (!(active & GERROR_ERR_MASK))
Will Deacon48ec83b2015-05-27 17:25:59 +01001379 return IRQ_NONE; /* No errors pending */
1380
1381 dev_warn(smmu->dev,
1382 "unexpected global error reported (0x%08x), this could be serious\n",
Prem Mallappa324ba102015-12-14 22:01:14 +05301383 active);
Will Deacon48ec83b2015-05-27 17:25:59 +01001384
Prem Mallappa324ba102015-12-14 22:01:14 +05301385 if (active & GERROR_SFM_ERR) {
Will Deacon48ec83b2015-05-27 17:25:59 +01001386 dev_err(smmu->dev, "device has entered Service Failure Mode!\n");
1387 arm_smmu_device_disable(smmu);
1388 }
1389
Prem Mallappa324ba102015-12-14 22:01:14 +05301390 if (active & GERROR_MSI_GERROR_ABT_ERR)
Will Deacon48ec83b2015-05-27 17:25:59 +01001391 dev_warn(smmu->dev, "GERROR MSI write aborted\n");
1392
Jean-Philippe Bruckerb4163fb2016-08-22 14:42:24 +01001393 if (active & GERROR_MSI_PRIQ_ABT_ERR)
Will Deacon48ec83b2015-05-27 17:25:59 +01001394 dev_warn(smmu->dev, "PRIQ MSI write aborted\n");
Will Deacon48ec83b2015-05-27 17:25:59 +01001395
Jean-Philippe Bruckerb4163fb2016-08-22 14:42:24 +01001396 if (active & GERROR_MSI_EVTQ_ABT_ERR)
Will Deacon48ec83b2015-05-27 17:25:59 +01001397 dev_warn(smmu->dev, "EVTQ MSI write aborted\n");
Will Deacon48ec83b2015-05-27 17:25:59 +01001398
Robin Murphydce032a2017-08-31 14:44:26 +01001399 if (active & GERROR_MSI_CMDQ_ABT_ERR)
Will Deacon48ec83b2015-05-27 17:25:59 +01001400 dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
Will Deacon48ec83b2015-05-27 17:25:59 +01001401
Prem Mallappa324ba102015-12-14 22:01:14 +05301402 if (active & GERROR_PRIQ_ABT_ERR)
Will Deacon48ec83b2015-05-27 17:25:59 +01001403 dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n");
1404
Prem Mallappa324ba102015-12-14 22:01:14 +05301405 if (active & GERROR_EVTQ_ABT_ERR)
Will Deacon48ec83b2015-05-27 17:25:59 +01001406 dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n");
1407
Prem Mallappa324ba102015-12-14 22:01:14 +05301408 if (active & GERROR_CMDQ_ERR)
Will Deacon48ec83b2015-05-27 17:25:59 +01001409 arm_smmu_cmdq_skip_err(smmu);
1410
1411 writel(gerror, smmu->base + ARM_SMMU_GERRORN);
1412 return IRQ_HANDLED;
1413}
1414
Geetha Sowjanyaf9354482017-06-23 19:04:36 +05301415static irqreturn_t arm_smmu_combined_irq_thread(int irq, void *dev)
1416{
1417 struct arm_smmu_device *smmu = dev;
1418
1419 arm_smmu_evtq_thread(irq, dev);
1420 if (smmu->features & ARM_SMMU_FEAT_PRI)
1421 arm_smmu_priq_thread(irq, dev);
1422
1423 return IRQ_HANDLED;
1424}
1425
1426static irqreturn_t arm_smmu_combined_irq_handler(int irq, void *dev)
1427{
1428 arm_smmu_gerror_handler(irq, dev);
Geetha Sowjanyaf9354482017-06-23 19:04:36 +05301429 return IRQ_WAKE_THREAD;
1430}
1431
Will Deacon48ec83b2015-05-27 17:25:59 +01001432/* IO_PGTABLE API */
1433static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
1434{
Robin Murphy2f657ad2017-08-31 14:44:25 +01001435 arm_smmu_cmdq_issue_sync(smmu);
Will Deacon48ec83b2015-05-27 17:25:59 +01001436}
1437
1438static void arm_smmu_tlb_sync(void *cookie)
1439{
1440 struct arm_smmu_domain *smmu_domain = cookie;
1441 __arm_smmu_tlb_sync(smmu_domain->smmu);
1442}
1443
1444static void arm_smmu_tlb_inv_context(void *cookie)
1445{
1446 struct arm_smmu_domain *smmu_domain = cookie;
1447 struct arm_smmu_device *smmu = smmu_domain->smmu;
1448 struct arm_smmu_cmdq_ent cmd;
1449
1450 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1451 cmd.opcode = CMDQ_OP_TLBI_NH_ASID;
1452 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1453 cmd.tlbi.vmid = 0;
1454 } else {
1455 cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
1456 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1457 }
1458
1459 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1460 __arm_smmu_tlb_sync(smmu);
1461}
1462
1463static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +00001464 size_t granule, bool leaf, void *cookie)
Will Deacon48ec83b2015-05-27 17:25:59 +01001465{
1466 struct arm_smmu_domain *smmu_domain = cookie;
1467 struct arm_smmu_device *smmu = smmu_domain->smmu;
1468 struct arm_smmu_cmdq_ent cmd = {
1469 .tlbi = {
1470 .leaf = leaf,
1471 .addr = iova,
1472 },
1473 };
1474
1475 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1476 cmd.opcode = CMDQ_OP_TLBI_NH_VA;
1477 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1478 } else {
1479 cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
1480 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1481 }
1482
Robin Murphy75df1382015-12-07 18:18:52 +00001483 do {
1484 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1485 cmd.tlbi.addr += granule;
1486 } while (size -= granule);
Will Deacon48ec83b2015-05-27 17:25:59 +01001487}
1488
Bhumika Goyalca297aa2016-10-25 23:36:11 +05301489static const struct iommu_gather_ops arm_smmu_gather_ops = {
Will Deacon48ec83b2015-05-27 17:25:59 +01001490 .tlb_flush_all = arm_smmu_tlb_inv_context,
1491 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1492 .tlb_sync = arm_smmu_tlb_sync,
Will Deacon48ec83b2015-05-27 17:25:59 +01001493};
1494
1495/* IOMMU API */
1496static bool arm_smmu_capable(enum iommu_cap cap)
1497{
1498 switch (cap) {
1499 case IOMMU_CAP_CACHE_COHERENCY:
1500 return true;
Will Deacon48ec83b2015-05-27 17:25:59 +01001501 case IOMMU_CAP_NOEXEC:
1502 return true;
1503 default:
1504 return false;
1505 }
1506}
1507
1508static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
1509{
1510 struct arm_smmu_domain *smmu_domain;
1511
Will Deaconbeb3c6a2017-01-06 16:27:30 +00001512 if (type != IOMMU_DOMAIN_UNMANAGED &&
1513 type != IOMMU_DOMAIN_DMA &&
1514 type != IOMMU_DOMAIN_IDENTITY)
Will Deacon48ec83b2015-05-27 17:25:59 +01001515 return NULL;
1516
1517 /*
1518 * Allocate the domain and initialise some of its data structures.
1519 * We can't really do anything meaningful until we've added a
1520 * master.
1521 */
1522 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1523 if (!smmu_domain)
1524 return NULL;
1525
Robin Murphy9adb9592016-01-26 18:06:36 +00001526 if (type == IOMMU_DOMAIN_DMA &&
1527 iommu_get_dma_cookie(&smmu_domain->domain)) {
1528 kfree(smmu_domain);
1529 return NULL;
1530 }
1531
Will Deacon48ec83b2015-05-27 17:25:59 +01001532 mutex_init(&smmu_domain->init_mutex);
Will Deacon48ec83b2015-05-27 17:25:59 +01001533 return &smmu_domain->domain;
1534}
1535
1536static int arm_smmu_bitmap_alloc(unsigned long *map, int span)
1537{
1538 int idx, size = 1 << span;
1539
1540 do {
1541 idx = find_first_zero_bit(map, size);
1542 if (idx == size)
1543 return -ENOSPC;
1544 } while (test_and_set_bit(idx, map));
1545
1546 return idx;
1547}
1548
1549static void arm_smmu_bitmap_free(unsigned long *map, int idx)
1550{
1551 clear_bit(idx, map);
1552}
1553
1554static void arm_smmu_domain_free(struct iommu_domain *domain)
1555{
1556 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1557 struct arm_smmu_device *smmu = smmu_domain->smmu;
1558
Robin Murphy9adb9592016-01-26 18:06:36 +00001559 iommu_put_dma_cookie(domain);
Markus Elfringa6e08fb2015-06-29 17:47:43 +01001560 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon48ec83b2015-05-27 17:25:59 +01001561
1562 /* Free the CD and ASID, if we allocated them */
1563 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1564 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1565
1566 if (cfg->cdptr) {
Will Deacon04fa26c2015-10-30 18:12:41 +00001567 dmam_free_coherent(smmu_domain->smmu->dev,
1568 CTXDESC_CD_DWORDS << 3,
1569 cfg->cdptr,
1570 cfg->cdptr_dma);
Will Deacon48ec83b2015-05-27 17:25:59 +01001571
1572 arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid);
1573 }
1574 } else {
1575 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1576 if (cfg->vmid)
1577 arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
1578 }
1579
1580 kfree(smmu_domain);
1581}
1582
1583static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
1584 struct io_pgtable_cfg *pgtbl_cfg)
1585{
1586 int ret;
Will Deaconc0733a22015-10-13 17:51:14 +01001587 int asid;
Will Deacon48ec83b2015-05-27 17:25:59 +01001588 struct arm_smmu_device *smmu = smmu_domain->smmu;
1589 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1590
1591 asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001592 if (asid < 0)
Will Deacon48ec83b2015-05-27 17:25:59 +01001593 return asid;
1594
Will Deacon04fa26c2015-10-30 18:12:41 +00001595 cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3,
1596 &cfg->cdptr_dma,
1597 GFP_KERNEL | __GFP_ZERO);
Will Deacon48ec83b2015-05-27 17:25:59 +01001598 if (!cfg->cdptr) {
1599 dev_warn(smmu->dev, "failed to allocate context descriptor\n");
Will Deaconc0733a22015-10-13 17:51:14 +01001600 ret = -ENOMEM;
Will Deacon48ec83b2015-05-27 17:25:59 +01001601 goto out_free_asid;
1602 }
1603
Will Deaconc0733a22015-10-13 17:51:14 +01001604 cfg->cd.asid = (u16)asid;
Will Deacon48ec83b2015-05-27 17:25:59 +01001605 cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1606 cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1607 cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1608 return 0;
1609
1610out_free_asid:
1611 arm_smmu_bitmap_free(smmu->asid_map, asid);
1612 return ret;
1613}
1614
1615static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
1616 struct io_pgtable_cfg *pgtbl_cfg)
1617{
Will Deaconc0733a22015-10-13 17:51:14 +01001618 int vmid;
Will Deacon48ec83b2015-05-27 17:25:59 +01001619 struct arm_smmu_device *smmu = smmu_domain->smmu;
1620 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1621
1622 vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001623 if (vmid < 0)
Will Deacon48ec83b2015-05-27 17:25:59 +01001624 return vmid;
1625
Will Deaconc0733a22015-10-13 17:51:14 +01001626 cfg->vmid = (u16)vmid;
Will Deacon48ec83b2015-05-27 17:25:59 +01001627 cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
1628 cfg->vtcr = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1629 return 0;
1630}
1631
Will Deacon48ec83b2015-05-27 17:25:59 +01001632static int arm_smmu_domain_finalise(struct iommu_domain *domain)
1633{
1634 int ret;
1635 unsigned long ias, oas;
1636 enum io_pgtable_fmt fmt;
1637 struct io_pgtable_cfg pgtbl_cfg;
1638 struct io_pgtable_ops *pgtbl_ops;
1639 int (*finalise_stage_fn)(struct arm_smmu_domain *,
1640 struct io_pgtable_cfg *);
1641 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1642 struct arm_smmu_device *smmu = smmu_domain->smmu;
1643
Will Deaconbeb3c6a2017-01-06 16:27:30 +00001644 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
1645 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
1646 return 0;
1647 }
1648
Will Deacon48ec83b2015-05-27 17:25:59 +01001649 /* Restrict the stage to what we can actually support */
1650 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1651 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1652 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1653 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1654
1655 switch (smmu_domain->stage) {
1656 case ARM_SMMU_DOMAIN_S1:
1657 ias = VA_BITS;
1658 oas = smmu->ias;
1659 fmt = ARM_64_LPAE_S1;
1660 finalise_stage_fn = arm_smmu_domain_finalise_s1;
1661 break;
1662 case ARM_SMMU_DOMAIN_NESTED:
1663 case ARM_SMMU_DOMAIN_S2:
1664 ias = smmu->ias;
1665 oas = smmu->oas;
1666 fmt = ARM_64_LPAE_S2;
1667 finalise_stage_fn = arm_smmu_domain_finalise_s2;
1668 break;
1669 default:
1670 return -EINVAL;
1671 }
1672
1673 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +01001674 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon48ec83b2015-05-27 17:25:59 +01001675 .ias = ias,
1676 .oas = oas,
1677 .tlb = &arm_smmu_gather_ops,
Robin Murphybdc6d972015-07-29 19:46:07 +01001678 .iommu_dev = smmu->dev,
Will Deacon48ec83b2015-05-27 17:25:59 +01001679 };
1680
Robin Murphy81b3c252017-06-22 16:53:53 +01001681 if (smmu->features & ARM_SMMU_FEAT_COHERENCY)
1682 pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
1683
Will Deacon48ec83b2015-05-27 17:25:59 +01001684 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
1685 if (!pgtbl_ops)
1686 return -ENOMEM;
1687
Robin Murphyd5466352016-05-09 17:20:09 +01001688 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Robin Murphy455eb7d2016-09-12 17:13:58 +01001689 domain->geometry.aperture_end = (1UL << ias) - 1;
1690 domain->geometry.force_aperture = true;
Will Deacon48ec83b2015-05-27 17:25:59 +01001691
1692 ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
Jean-Philippe Brucker57d72e12017-12-14 11:03:01 +00001693 if (ret < 0) {
Will Deacon48ec83b2015-05-27 17:25:59 +01001694 free_io_pgtable_ops(pgtbl_ops);
Jean-Philippe Brucker57d72e12017-12-14 11:03:01 +00001695 return ret;
1696 }
Will Deacon48ec83b2015-05-27 17:25:59 +01001697
Jean-Philippe Brucker57d72e12017-12-14 11:03:01 +00001698 smmu_domain->pgtbl_ops = pgtbl_ops;
1699 return 0;
Will Deacon48ec83b2015-05-27 17:25:59 +01001700}
1701
Will Deacon48ec83b2015-05-27 17:25:59 +01001702static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
1703{
1704 __le64 *step;
1705 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1706
1707 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1708 struct arm_smmu_strtab_l1_desc *l1_desc;
1709 int idx;
1710
1711 /* Two-level walk */
1712 idx = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS;
1713 l1_desc = &cfg->l1_desc[idx];
1714 idx = (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS;
1715 step = &l1_desc->l2ptr[idx];
1716 } else {
1717 /* Simple linear lookup */
1718 step = &cfg->strtab[sid * STRTAB_STE_DWORDS];
1719 }
1720
1721 return step;
1722}
1723
Will Deacon67560ed2017-03-01 21:11:29 +00001724static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
Will Deacon48ec83b2015-05-27 17:25:59 +01001725{
Robin Murphy563b5cb2018-01-02 12:33:14 +00001726 int i, j;
Robin Murphy8f785152016-09-12 17:13:45 +01001727 struct arm_smmu_master_data *master = fwspec->iommu_priv;
1728 struct arm_smmu_device *smmu = master->smmu;
Will Deacon48ec83b2015-05-27 17:25:59 +01001729
Robin Murphy8f785152016-09-12 17:13:45 +01001730 for (i = 0; i < fwspec->num_ids; ++i) {
1731 u32 sid = fwspec->ids[i];
Will Deacon48ec83b2015-05-27 17:25:59 +01001732 __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
1733
Robin Murphy563b5cb2018-01-02 12:33:14 +00001734 /* Bridged PCI devices may end up with duplicated IDs */
1735 for (j = 0; j < i; j++)
1736 if (fwspec->ids[j] == sid)
1737 break;
1738 if (j < i)
1739 continue;
1740
Robin Murphy8f785152016-09-12 17:13:45 +01001741 arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
Will Deacon48ec83b2015-05-27 17:25:59 +01001742 }
Will Deacon48ec83b2015-05-27 17:25:59 +01001743}
1744
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001745static void arm_smmu_detach_dev(struct device *dev)
1746{
Robin Murphy8f785152016-09-12 17:13:45 +01001747 struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001748
Will Deaconbeb3c6a2017-01-06 16:27:30 +00001749 master->ste.assigned = false;
Will Deacon67560ed2017-03-01 21:11:29 +00001750 arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001751}
1752
Will Deacon48ec83b2015-05-27 17:25:59 +01001753static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1754{
1755 int ret = 0;
1756 struct arm_smmu_device *smmu;
1757 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Robin Murphy8f785152016-09-12 17:13:45 +01001758 struct arm_smmu_master_data *master;
1759 struct arm_smmu_strtab_ent *ste;
Will Deacon48ec83b2015-05-27 17:25:59 +01001760
Robin Murphy8f785152016-09-12 17:13:45 +01001761 if (!dev->iommu_fwspec)
Will Deacon48ec83b2015-05-27 17:25:59 +01001762 return -ENOENT;
1763
Robin Murphy8f785152016-09-12 17:13:45 +01001764 master = dev->iommu_fwspec->iommu_priv;
1765 smmu = master->smmu;
1766 ste = &master->ste;
1767
Will Deacon48ec83b2015-05-27 17:25:59 +01001768 /* Already attached to a different domain? */
Will Deaconbeb3c6a2017-01-06 16:27:30 +00001769 if (ste->assigned)
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001770 arm_smmu_detach_dev(dev);
Will Deacon48ec83b2015-05-27 17:25:59 +01001771
Will Deacon48ec83b2015-05-27 17:25:59 +01001772 mutex_lock(&smmu_domain->init_mutex);
1773
1774 if (!smmu_domain->smmu) {
1775 smmu_domain->smmu = smmu;
1776 ret = arm_smmu_domain_finalise(domain);
1777 if (ret) {
1778 smmu_domain->smmu = NULL;
1779 goto out_unlock;
1780 }
1781 } else if (smmu_domain->smmu != smmu) {
1782 dev_err(dev,
1783 "cannot attach to SMMU %s (upstream of %s)\n",
1784 dev_name(smmu_domain->smmu->dev),
1785 dev_name(smmu->dev));
1786 ret = -ENXIO;
1787 goto out_unlock;
1788 }
1789
Will Deaconbeb3c6a2017-01-06 16:27:30 +00001790 ste->assigned = true;
Will Deacon48ec83b2015-05-27 17:25:59 +01001791
Will Deaconbeb3c6a2017-01-06 16:27:30 +00001792 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) {
1793 ste->s1_cfg = NULL;
1794 ste->s2_cfg = NULL;
1795 } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
Robin Murphy8f785152016-09-12 17:13:45 +01001796 ste->s1_cfg = &smmu_domain->s1_cfg;
1797 ste->s2_cfg = NULL;
1798 arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
1799 } else {
1800 ste->s1_cfg = NULL;
1801 ste->s2_cfg = &smmu_domain->s2_cfg;
1802 }
Will Deaconcbf82772016-02-18 12:05:57 +00001803
Will Deacon67560ed2017-03-01 21:11:29 +00001804 arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
Will Deacon48ec83b2015-05-27 17:25:59 +01001805out_unlock:
1806 mutex_unlock(&smmu_domain->init_mutex);
1807 return ret;
1808}
1809
Will Deacon48ec83b2015-05-27 17:25:59 +01001810static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1811 phys_addr_t paddr, size_t size, int prot)
1812{
Robin Murphy58188af2017-06-22 16:53:57 +01001813 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Will Deacon48ec83b2015-05-27 17:25:59 +01001814
1815 if (!ops)
1816 return -ENODEV;
1817
Robin Murphy58188af2017-06-22 16:53:57 +01001818 return ops->map(ops, iova, paddr, size, prot);
Will Deacon48ec83b2015-05-27 17:25:59 +01001819}
1820
1821static size_t
1822arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
1823{
Robin Murphy58188af2017-06-22 16:53:57 +01001824 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Will Deacon48ec83b2015-05-27 17:25:59 +01001825
1826 if (!ops)
1827 return 0;
1828
Robin Murphy58188af2017-06-22 16:53:57 +01001829 return ops->unmap(ops, iova, size);
Will Deacon48ec83b2015-05-27 17:25:59 +01001830}
1831
Robin Murphy32b12442017-09-28 15:55:01 +01001832static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
1833{
1834 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1835
1836 if (smmu)
1837 __arm_smmu_tlb_sync(smmu);
1838}
1839
Will Deacon48ec83b2015-05-27 17:25:59 +01001840static phys_addr_t
1841arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1842{
Robin Murphy58188af2017-06-22 16:53:57 +01001843 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Will Deacon48ec83b2015-05-27 17:25:59 +01001844
Sunil Gouthambdf95922017-04-25 15:27:52 +05301845 if (domain->type == IOMMU_DOMAIN_IDENTITY)
1846 return iova;
1847
Will Deacon48ec83b2015-05-27 17:25:59 +01001848 if (!ops)
1849 return 0;
1850
Robin Murphy58188af2017-06-22 16:53:57 +01001851 return ops->iova_to_phys(ops, iova);
Will Deacon48ec83b2015-05-27 17:25:59 +01001852}
1853
Robin Murphy8f785152016-09-12 17:13:45 +01001854static struct platform_driver arm_smmu_driver;
1855
1856static int arm_smmu_match_node(struct device *dev, void *data)
Will Deacon48ec83b2015-05-27 17:25:59 +01001857{
Lorenzo Pieralisi778de072016-11-21 10:01:38 +00001858 return dev->fwnode == data;
Will Deacon48ec83b2015-05-27 17:25:59 +01001859}
1860
Lorenzo Pieralisi778de072016-11-21 10:01:38 +00001861static
1862struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
Will Deacon48ec83b2015-05-27 17:25:59 +01001863{
Robin Murphy8f785152016-09-12 17:13:45 +01001864 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
Lorenzo Pieralisi778de072016-11-21 10:01:38 +00001865 fwnode, arm_smmu_match_node);
Robin Murphy8f785152016-09-12 17:13:45 +01001866 put_device(dev);
1867 return dev ? dev_get_drvdata(dev) : NULL;
Will Deacon48ec83b2015-05-27 17:25:59 +01001868}
1869
1870static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
1871{
1872 unsigned long limit = smmu->strtab_cfg.num_l1_ents;
1873
1874 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
1875 limit *= 1UL << STRTAB_SPLIT;
1876
1877 return sid < limit;
1878}
1879
Robin Murphy8f785152016-09-12 17:13:45 +01001880static struct iommu_ops arm_smmu_ops;
1881
Will Deacon48ec83b2015-05-27 17:25:59 +01001882static int arm_smmu_add_device(struct device *dev)
1883{
1884 int i, ret;
Will Deacon48ec83b2015-05-27 17:25:59 +01001885 struct arm_smmu_device *smmu;
Robin Murphy8f785152016-09-12 17:13:45 +01001886 struct arm_smmu_master_data *master;
1887 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1888 struct iommu_group *group;
Will Deacon48ec83b2015-05-27 17:25:59 +01001889
Robin Murphy8f785152016-09-12 17:13:45 +01001890 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Will Deacon48ec83b2015-05-27 17:25:59 +01001891 return -ENODEV;
Robin Murphy8f785152016-09-12 17:13:45 +01001892 /*
1893 * We _can_ actually withstand dodgy bus code re-calling add_device()
1894 * without an intervening remove_device()/of_xlate() sequence, but
1895 * we're not going to do so quietly...
1896 */
1897 if (WARN_ON_ONCE(fwspec->iommu_priv)) {
1898 master = fwspec->iommu_priv;
1899 smmu = master->smmu;
Will Deacon48ec83b2015-05-27 17:25:59 +01001900 } else {
Lorenzo Pieralisi778de072016-11-21 10:01:38 +00001901 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
Robin Murphy8f785152016-09-12 17:13:45 +01001902 if (!smmu)
1903 return -ENODEV;
1904 master = kzalloc(sizeof(*master), GFP_KERNEL);
1905 if (!master)
1906 return -ENOMEM;
1907
1908 master->smmu = smmu;
1909 fwspec->iommu_priv = master;
Will Deacon48ec83b2015-05-27 17:25:59 +01001910 }
1911
Robin Murphy8f785152016-09-12 17:13:45 +01001912 /* Check the SIDs are in range of the SMMU and our stream table */
1913 for (i = 0; i < fwspec->num_ids; i++) {
1914 u32 sid = fwspec->ids[i];
1915
1916 if (!arm_smmu_sid_in_range(smmu, sid))
1917 return -ERANGE;
1918
1919 /* Ensure l2 strtab is initialised */
1920 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1921 ret = arm_smmu_init_l2_strtab(smmu, sid);
1922 if (ret)
1923 return ret;
1924 }
Will Deacon48ec83b2015-05-27 17:25:59 +01001925 }
1926
Robin Murphy8f785152016-09-12 17:13:45 +01001927 group = iommu_group_get_for_dev(dev);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001928 if (!IS_ERR(group)) {
Robin Murphy8f785152016-09-12 17:13:45 +01001929 iommu_group_put(group);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001930 iommu_device_link(&smmu->iommu, dev);
1931 }
Will Deacon48ec83b2015-05-27 17:25:59 +01001932
Robin Murphy8f785152016-09-12 17:13:45 +01001933 return PTR_ERR_OR_ZERO(group);
Will Deacon48ec83b2015-05-27 17:25:59 +01001934}
1935
1936static void arm_smmu_remove_device(struct device *dev)
1937{
Robin Murphy8f785152016-09-12 17:13:45 +01001938 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1939 struct arm_smmu_master_data *master;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001940 struct arm_smmu_device *smmu;
Robin Murphy8f785152016-09-12 17:13:45 +01001941
1942 if (!fwspec || fwspec->ops != &arm_smmu_ops)
1943 return;
1944
1945 master = fwspec->iommu_priv;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001946 smmu = master->smmu;
Will Deaconbeb3c6a2017-01-06 16:27:30 +00001947 if (master && master->ste.assigned)
Robin Murphy8f785152016-09-12 17:13:45 +01001948 arm_smmu_detach_dev(dev);
Will Deacon48ec83b2015-05-27 17:25:59 +01001949 iommu_group_remove_device(dev);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001950 iommu_device_unlink(&smmu->iommu, dev);
Robin Murphy8f785152016-09-12 17:13:45 +01001951 kfree(master);
1952 iommu_fwspec_free(dev);
Will Deacon48ec83b2015-05-27 17:25:59 +01001953}
1954
Robin Murphy08d4ca22016-09-12 17:13:46 +01001955static struct iommu_group *arm_smmu_device_group(struct device *dev)
1956{
1957 struct iommu_group *group;
1958
1959 /*
1960 * We don't support devices sharing stream IDs other than PCI RID
1961 * aliases, since the necessary ID-to-device lookup becomes rather
1962 * impractical given a potential sparse 32-bit stream ID space.
1963 */
1964 if (dev_is_pci(dev))
1965 group = pci_device_group(dev);
1966 else
1967 group = generic_device_group(dev);
1968
1969 return group;
1970}
1971
Will Deacon48ec83b2015-05-27 17:25:59 +01001972static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1973 enum iommu_attr attr, void *data)
1974{
1975 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1976
Will Deacon0834cc22017-01-06 16:28:17 +00001977 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
1978 return -EINVAL;
1979
Will Deacon48ec83b2015-05-27 17:25:59 +01001980 switch (attr) {
1981 case DOMAIN_ATTR_NESTING:
1982 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1983 return 0;
1984 default:
1985 return -ENODEV;
1986 }
1987}
1988
1989static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1990 enum iommu_attr attr, void *data)
1991{
1992 int ret = 0;
1993 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1994
Will Deacon0834cc22017-01-06 16:28:17 +00001995 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
1996 return -EINVAL;
1997
Will Deacon48ec83b2015-05-27 17:25:59 +01001998 mutex_lock(&smmu_domain->init_mutex);
1999
2000 switch (attr) {
2001 case DOMAIN_ATTR_NESTING:
2002 if (smmu_domain->smmu) {
2003 ret = -EPERM;
2004 goto out_unlock;
2005 }
2006
2007 if (*(int *)data)
2008 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
2009 else
2010 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
2011
2012 break;
2013 default:
2014 ret = -ENODEV;
2015 }
2016
2017out_unlock:
2018 mutex_unlock(&smmu_domain->init_mutex);
2019 return ret;
2020}
2021
Robin Murphy8f785152016-09-12 17:13:45 +01002022static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
2023{
Robin Murphy8f785152016-09-12 17:13:45 +01002024 return iommu_fwspec_add_ids(dev, args->args, 1);
2025}
2026
Eric Auger50019f02017-01-19 20:57:56 +00002027static void arm_smmu_get_resv_regions(struct device *dev,
2028 struct list_head *head)
2029{
2030 struct iommu_resv_region *region;
2031 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
2032
2033 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00002034 prot, IOMMU_RESV_SW_MSI);
Eric Auger50019f02017-01-19 20:57:56 +00002035 if (!region)
2036 return;
2037
2038 list_add_tail(&region->list, head);
Robin Murphy273df962017-03-16 17:00:19 +00002039
2040 iommu_dma_get_resv_regions(dev, head);
Eric Auger50019f02017-01-19 20:57:56 +00002041}
2042
2043static void arm_smmu_put_resv_regions(struct device *dev,
2044 struct list_head *head)
2045{
2046 struct iommu_resv_region *entry, *next;
2047
2048 list_for_each_entry_safe(entry, next, head, list)
2049 kfree(entry);
2050}
2051
Will Deacon48ec83b2015-05-27 17:25:59 +01002052static struct iommu_ops arm_smmu_ops = {
2053 .capable = arm_smmu_capable,
2054 .domain_alloc = arm_smmu_domain_alloc,
2055 .domain_free = arm_smmu_domain_free,
2056 .attach_dev = arm_smmu_attach_dev,
Will Deacon48ec83b2015-05-27 17:25:59 +01002057 .map = arm_smmu_map,
2058 .unmap = arm_smmu_unmap,
Jean-Philippe Brucker9aeb26c2016-06-03 11:50:30 +01002059 .map_sg = default_iommu_map_sg,
Robin Murphy32b12442017-09-28 15:55:01 +01002060 .flush_iotlb_all = arm_smmu_iotlb_sync,
2061 .iotlb_sync = arm_smmu_iotlb_sync,
Will Deacon48ec83b2015-05-27 17:25:59 +01002062 .iova_to_phys = arm_smmu_iova_to_phys,
2063 .add_device = arm_smmu_add_device,
2064 .remove_device = arm_smmu_remove_device,
Robin Murphy08d4ca22016-09-12 17:13:46 +01002065 .device_group = arm_smmu_device_group,
Will Deacon48ec83b2015-05-27 17:25:59 +01002066 .domain_get_attr = arm_smmu_domain_get_attr,
2067 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy8f785152016-09-12 17:13:45 +01002068 .of_xlate = arm_smmu_of_xlate,
Eric Auger50019f02017-01-19 20:57:56 +00002069 .get_resv_regions = arm_smmu_get_resv_regions,
2070 .put_resv_regions = arm_smmu_put_resv_regions,
Will Deacon48ec83b2015-05-27 17:25:59 +01002071 .pgsize_bitmap = -1UL, /* Restricted during device attach */
2072};
2073
2074/* Probing and initialisation functions */
2075static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
2076 struct arm_smmu_queue *q,
2077 unsigned long prod_off,
2078 unsigned long cons_off,
2079 size_t dwords)
2080{
2081 size_t qsz = ((1 << q->max_n_shift) * dwords) << 3;
2082
Will Deacon04fa26c2015-10-30 18:12:41 +00002083 q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL);
Will Deacon48ec83b2015-05-27 17:25:59 +01002084 if (!q->base) {
2085 dev_err(smmu->dev, "failed to allocate queue (0x%zx bytes)\n",
2086 qsz);
2087 return -ENOMEM;
2088 }
2089
Linu Cheriane5b829d2017-06-22 17:35:37 +05302090 q->prod_reg = arm_smmu_page1_fixup(prod_off, smmu);
2091 q->cons_reg = arm_smmu_page1_fixup(cons_off, smmu);
Will Deacon48ec83b2015-05-27 17:25:59 +01002092 q->ent_dwords = dwords;
2093
2094 q->q_base = Q_BASE_RWA;
Robin Murphy1cf9e542018-03-26 13:35:09 +01002095 q->q_base |= q->base_dma & Q_BASE_ADDR_MASK;
Will Deacon48ec83b2015-05-27 17:25:59 +01002096 q->q_base |= (q->max_n_shift & Q_BASE_LOG2SIZE_MASK)
2097 << Q_BASE_LOG2SIZE_SHIFT;
2098
2099 q->prod = q->cons = 0;
2100 return 0;
2101}
2102
Will Deacon48ec83b2015-05-27 17:25:59 +01002103static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
2104{
2105 int ret;
2106
2107 /* cmdq */
2108 spin_lock_init(&smmu->cmdq.lock);
2109 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
2110 ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS);
2111 if (ret)
Will Deacon04fa26c2015-10-30 18:12:41 +00002112 return ret;
Will Deacon48ec83b2015-05-27 17:25:59 +01002113
2114 /* evtq */
2115 ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD,
2116 ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS);
2117 if (ret)
Will Deacon04fa26c2015-10-30 18:12:41 +00002118 return ret;
Will Deacon48ec83b2015-05-27 17:25:59 +01002119
2120 /* priq */
2121 if (!(smmu->features & ARM_SMMU_FEAT_PRI))
2122 return 0;
2123
Will Deacon04fa26c2015-10-30 18:12:41 +00002124 return arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD,
2125 ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS);
Will Deacon48ec83b2015-05-27 17:25:59 +01002126}
2127
2128static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
2129{
2130 unsigned int i;
2131 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2132 size_t size = sizeof(*cfg->l1_desc) * cfg->num_l1_ents;
2133 void *strtab = smmu->strtab_cfg.strtab;
2134
2135 cfg->l1_desc = devm_kzalloc(smmu->dev, size, GFP_KERNEL);
2136 if (!cfg->l1_desc) {
2137 dev_err(smmu->dev, "failed to allocate l1 stream table desc\n");
2138 return -ENOMEM;
2139 }
2140
2141 for (i = 0; i < cfg->num_l1_ents; ++i) {
2142 arm_smmu_write_strtab_l1_desc(strtab, &cfg->l1_desc[i]);
2143 strtab += STRTAB_L1_DESC_DWORDS << 3;
2144 }
2145
2146 return 0;
2147}
2148
2149static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
2150{
2151 void *strtab;
2152 u64 reg;
Will Deacond2e88e72015-06-30 10:02:28 +01002153 u32 size, l1size;
Will Deacon48ec83b2015-05-27 17:25:59 +01002154 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2155
Nate Watterson692c4e42017-01-10 14:47:13 -05002156 /* Calculate the L1 size, capped to the SIDSIZE. */
2157 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
2158 size = min(size, smmu->sid_bits - STRTAB_SPLIT);
Will Deacond2e88e72015-06-30 10:02:28 +01002159 cfg->num_l1_ents = 1 << size;
2160
2161 size += STRTAB_SPLIT;
2162 if (size < smmu->sid_bits)
Will Deacon48ec83b2015-05-27 17:25:59 +01002163 dev_warn(smmu->dev,
2164 "2-level strtab only covers %u/%u bits of SID\n",
Will Deacond2e88e72015-06-30 10:02:28 +01002165 size, smmu->sid_bits);
Will Deacon48ec83b2015-05-27 17:25:59 +01002166
Will Deacond2e88e72015-06-30 10:02:28 +01002167 l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
Will Deacon04fa26c2015-10-30 18:12:41 +00002168 strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
2169 GFP_KERNEL | __GFP_ZERO);
Will Deacon48ec83b2015-05-27 17:25:59 +01002170 if (!strtab) {
2171 dev_err(smmu->dev,
2172 "failed to allocate l1 stream table (%u bytes)\n",
2173 size);
2174 return -ENOMEM;
2175 }
2176 cfg->strtab = strtab;
2177
2178 /* Configure strtab_base_cfg for 2 levels */
2179 reg = STRTAB_BASE_CFG_FMT_2LVL;
2180 reg |= (size & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2181 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2182 reg |= (STRTAB_SPLIT & STRTAB_BASE_CFG_SPLIT_MASK)
2183 << STRTAB_BASE_CFG_SPLIT_SHIFT;
2184 cfg->strtab_base_cfg = reg;
2185
Will Deacon04fa26c2015-10-30 18:12:41 +00002186 return arm_smmu_init_l1_strtab(smmu);
Will Deacon48ec83b2015-05-27 17:25:59 +01002187}
2188
2189static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
2190{
2191 void *strtab;
2192 u64 reg;
2193 u32 size;
2194 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2195
2196 size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3);
Will Deacon04fa26c2015-10-30 18:12:41 +00002197 strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma,
2198 GFP_KERNEL | __GFP_ZERO);
Will Deacon48ec83b2015-05-27 17:25:59 +01002199 if (!strtab) {
2200 dev_err(smmu->dev,
2201 "failed to allocate linear stream table (%u bytes)\n",
2202 size);
2203 return -ENOMEM;
2204 }
2205 cfg->strtab = strtab;
2206 cfg->num_l1_ents = 1 << smmu->sid_bits;
2207
2208 /* Configure strtab_base_cfg for a linear table covering all SIDs */
2209 reg = STRTAB_BASE_CFG_FMT_LINEAR;
2210 reg |= (smmu->sid_bits & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2211 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2212 cfg->strtab_base_cfg = reg;
2213
2214 arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents);
2215 return 0;
2216}
2217
2218static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
2219{
2220 u64 reg;
2221 int ret;
2222
2223 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
2224 ret = arm_smmu_init_strtab_2lvl(smmu);
2225 else
2226 ret = arm_smmu_init_strtab_linear(smmu);
2227
2228 if (ret)
2229 return ret;
2230
2231 /* Set the strtab base address */
Robin Murphy1cf9e542018-03-26 13:35:09 +01002232 reg = smmu->strtab_cfg.strtab_dma & STRTAB_BASE_ADDR_MASK;
Will Deacon48ec83b2015-05-27 17:25:59 +01002233 reg |= STRTAB_BASE_RA;
2234 smmu->strtab_cfg.strtab_base = reg;
2235
2236 /* Allocate the first VMID for stage-2 bypass STEs */
2237 set_bit(0, smmu->vmid_map);
2238 return 0;
2239}
2240
Will Deacon48ec83b2015-05-27 17:25:59 +01002241static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
2242{
2243 int ret;
2244
Robin Murphy37de98f2017-10-18 15:04:26 +01002245 atomic_set(&smmu->sync_nr, 0);
Will Deacon48ec83b2015-05-27 17:25:59 +01002246 ret = arm_smmu_init_queues(smmu);
2247 if (ret)
2248 return ret;
2249
Will Deacon04fa26c2015-10-30 18:12:41 +00002250 return arm_smmu_init_strtab(smmu);
Will Deacon48ec83b2015-05-27 17:25:59 +01002251}
2252
2253static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
2254 unsigned int reg_off, unsigned int ack_off)
2255{
2256 u32 reg;
2257
2258 writel_relaxed(val, smmu->base + reg_off);
2259 return readl_relaxed_poll_timeout(smmu->base + ack_off, reg, reg == val,
2260 1, ARM_SMMU_POLL_TIMEOUT_US);
2261}
2262
Robin Murphydc87a982016-09-12 17:13:44 +01002263/* GBPA is "special" */
2264static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr)
2265{
2266 int ret;
2267 u32 reg, __iomem *gbpa = smmu->base + ARM_SMMU_GBPA;
2268
2269 ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
2270 1, ARM_SMMU_POLL_TIMEOUT_US);
2271 if (ret)
2272 return ret;
2273
2274 reg &= ~clr;
2275 reg |= set;
2276 writel_relaxed(reg | GBPA_UPDATE, gbpa);
2277 return readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
2278 1, ARM_SMMU_POLL_TIMEOUT_US);
2279}
2280
Marc Zyngier166bdbd2015-10-13 18:32:30 +01002281static void arm_smmu_free_msis(void *data)
2282{
2283 struct device *dev = data;
2284 platform_msi_domain_free_irqs(dev);
2285}
2286
2287static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
2288{
2289 phys_addr_t doorbell;
2290 struct device *dev = msi_desc_to_dev(desc);
2291 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2292 phys_addr_t *cfg = arm_smmu_msi_cfg[desc->platform.msi_index];
2293
2294 doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
Robin Murphy1cf9e542018-03-26 13:35:09 +01002295 doorbell &= MSI_CFG0_ADDR_MASK;
Marc Zyngier166bdbd2015-10-13 18:32:30 +01002296
2297 writeq_relaxed(doorbell, smmu->base + cfg[0]);
2298 writel_relaxed(msg->data, smmu->base + cfg[1]);
2299 writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]);
2300}
2301
2302static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
2303{
2304 struct msi_desc *desc;
2305 int ret, nvec = ARM_SMMU_MAX_MSIS;
2306 struct device *dev = smmu->dev;
2307
2308 /* Clear the MSI address regs */
2309 writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0);
2310 writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0);
2311
2312 if (smmu->features & ARM_SMMU_FEAT_PRI)
2313 writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0);
2314 else
2315 nvec--;
2316
2317 if (!(smmu->features & ARM_SMMU_FEAT_MSI))
2318 return;
2319
Nate Watterson940ded92018-01-20 13:08:04 -05002320 if (!dev->msi_domain) {
2321 dev_info(smmu->dev, "msi_domain absent - falling back to wired irqs\n");
2322 return;
2323 }
2324
Marc Zyngier166bdbd2015-10-13 18:32:30 +01002325 /* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
2326 ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
2327 if (ret) {
Nate Watterson940ded92018-01-20 13:08:04 -05002328 dev_warn(dev, "failed to allocate MSIs - falling back to wired irqs\n");
Marc Zyngier166bdbd2015-10-13 18:32:30 +01002329 return;
2330 }
2331
2332 for_each_msi_entry(desc, dev) {
2333 switch (desc->platform.msi_index) {
2334 case EVTQ_MSI_INDEX:
2335 smmu->evtq.q.irq = desc->irq;
2336 break;
2337 case GERROR_MSI_INDEX:
2338 smmu->gerr_irq = desc->irq;
2339 break;
2340 case PRIQ_MSI_INDEX:
2341 smmu->priq.q.irq = desc->irq;
2342 break;
2343 default: /* Unknown */
2344 continue;
2345 }
2346 }
2347
2348 /* Add callback to free MSIs on teardown */
2349 devm_add_action(dev, arm_smmu_free_msis, dev);
2350}
2351
Geetha Sowjanyaf9354482017-06-23 19:04:36 +05302352static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu)
Will Deacon48ec83b2015-05-27 17:25:59 +01002353{
Geetha Sowjanyaf9354482017-06-23 19:04:36 +05302354 int irq, ret;
Will Deacon48ec83b2015-05-27 17:25:59 +01002355
Marc Zyngier166bdbd2015-10-13 18:32:30 +01002356 arm_smmu_setup_msis(smmu);
Will Deacon48ec83b2015-05-27 17:25:59 +01002357
Marc Zyngier166bdbd2015-10-13 18:32:30 +01002358 /* Request interrupt lines */
Will Deacon48ec83b2015-05-27 17:25:59 +01002359 irq = smmu->evtq.q.irq;
2360 if (irq) {
Jean-Philippe Bruckerb4163fb2016-08-22 14:42:24 +01002361 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Will Deacon48ec83b2015-05-27 17:25:59 +01002362 arm_smmu_evtq_thread,
Jean-Philippe Bruckerb4163fb2016-08-22 14:42:24 +01002363 IRQF_ONESHOT,
2364 "arm-smmu-v3-evtq", smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002365 if (ret < 0)
Will Deacon48ec83b2015-05-27 17:25:59 +01002366 dev_warn(smmu->dev, "failed to enable evtq irq\n");
Robin Murphy4c8996d2017-10-30 12:14:02 +00002367 } else {
2368 dev_warn(smmu->dev, "no evtq irq - events will not be reported!\n");
Will Deacon48ec83b2015-05-27 17:25:59 +01002369 }
2370
Will Deacon48ec83b2015-05-27 17:25:59 +01002371 irq = smmu->gerr_irq;
2372 if (irq) {
2373 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
2374 0, "arm-smmu-v3-gerror", smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002375 if (ret < 0)
Will Deacon48ec83b2015-05-27 17:25:59 +01002376 dev_warn(smmu->dev, "failed to enable gerror irq\n");
Robin Murphy4c8996d2017-10-30 12:14:02 +00002377 } else {
2378 dev_warn(smmu->dev, "no gerr irq - errors will not be reported!\n");
Will Deacon48ec83b2015-05-27 17:25:59 +01002379 }
2380
2381 if (smmu->features & ARM_SMMU_FEAT_PRI) {
Will Deacon48ec83b2015-05-27 17:25:59 +01002382 irq = smmu->priq.q.irq;
2383 if (irq) {
Jean-Philippe Bruckerb4163fb2016-08-22 14:42:24 +01002384 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Will Deacon48ec83b2015-05-27 17:25:59 +01002385 arm_smmu_priq_thread,
Jean-Philippe Bruckerb4163fb2016-08-22 14:42:24 +01002386 IRQF_ONESHOT,
2387 "arm-smmu-v3-priq",
Will Deacon48ec83b2015-05-27 17:25:59 +01002388 smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002389 if (ret < 0)
Will Deacon48ec83b2015-05-27 17:25:59 +01002390 dev_warn(smmu->dev,
2391 "failed to enable priq irq\n");
Robin Murphy4c8996d2017-10-30 12:14:02 +00002392 } else {
2393 dev_warn(smmu->dev, "no priq irq - PRI will be broken\n");
Will Deacon48ec83b2015-05-27 17:25:59 +01002394 }
2395 }
Geetha Sowjanyaf9354482017-06-23 19:04:36 +05302396}
2397
2398static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
2399{
2400 int ret, irq;
2401 u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
2402
2403 /* Disable IRQs first */
2404 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
2405 ARM_SMMU_IRQ_CTRLACK);
2406 if (ret) {
2407 dev_err(smmu->dev, "failed to disable irqs\n");
2408 return ret;
2409 }
2410
2411 irq = smmu->combined_irq;
2412 if (irq) {
2413 /*
2414 * Cavium ThunderX2 implementation doesn't not support unique
2415 * irq lines. Use single irq line for all the SMMUv3 interrupts.
2416 */
2417 ret = devm_request_threaded_irq(smmu->dev, irq,
2418 arm_smmu_combined_irq_handler,
2419 arm_smmu_combined_irq_thread,
2420 IRQF_ONESHOT,
2421 "arm-smmu-v3-combined-irq", smmu);
2422 if (ret < 0)
2423 dev_warn(smmu->dev, "failed to enable combined irq\n");
2424 } else
2425 arm_smmu_setup_unique_irqs(smmu);
2426
2427 if (smmu->features & ARM_SMMU_FEAT_PRI)
2428 irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
Will Deacon48ec83b2015-05-27 17:25:59 +01002429
2430 /* Enable interrupt generation on the SMMU */
Marc Zyngierccd63852015-07-15 11:55:18 +01002431 ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
Will Deacon48ec83b2015-05-27 17:25:59 +01002432 ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
2433 if (ret)
2434 dev_warn(smmu->dev, "failed to enable irqs\n");
2435
2436 return 0;
2437}
2438
2439static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
2440{
2441 int ret;
2442
2443 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_CR0, ARM_SMMU_CR0ACK);
2444 if (ret)
2445 dev_err(smmu->dev, "failed to clear cr0\n");
2446
2447 return ret;
2448}
2449
Robin Murphydc87a982016-09-12 17:13:44 +01002450static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
Will Deacon48ec83b2015-05-27 17:25:59 +01002451{
2452 int ret;
2453 u32 reg, enables;
2454 struct arm_smmu_cmdq_ent cmd;
2455
2456 /* Clear CR0 and sync (disables SMMU and queue processing) */
2457 reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
2458 if (reg & CR0_SMMUEN)
2459 dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
2460
2461 ret = arm_smmu_device_disable(smmu);
2462 if (ret)
2463 return ret;
2464
2465 /* CR1 (table and queue memory attributes) */
2466 reg = (CR1_SH_ISH << CR1_TABLE_SH_SHIFT) |
2467 (CR1_CACHE_WB << CR1_TABLE_OC_SHIFT) |
2468 (CR1_CACHE_WB << CR1_TABLE_IC_SHIFT) |
2469 (CR1_SH_ISH << CR1_QUEUE_SH_SHIFT) |
2470 (CR1_CACHE_WB << CR1_QUEUE_OC_SHIFT) |
2471 (CR1_CACHE_WB << CR1_QUEUE_IC_SHIFT);
2472 writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
2473
2474 /* CR2 (random crap) */
2475 reg = CR2_PTM | CR2_RECINVSID | CR2_E2H;
2476 writel_relaxed(reg, smmu->base + ARM_SMMU_CR2);
2477
2478 /* Stream table */
2479 writeq_relaxed(smmu->strtab_cfg.strtab_base,
2480 smmu->base + ARM_SMMU_STRTAB_BASE);
2481 writel_relaxed(smmu->strtab_cfg.strtab_base_cfg,
2482 smmu->base + ARM_SMMU_STRTAB_BASE_CFG);
2483
2484 /* Command queue */
2485 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
2486 writel_relaxed(smmu->cmdq.q.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
2487 writel_relaxed(smmu->cmdq.q.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
2488
2489 enables = CR0_CMDQEN;
2490 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2491 ARM_SMMU_CR0ACK);
2492 if (ret) {
2493 dev_err(smmu->dev, "failed to enable command queue\n");
2494 return ret;
2495 }
2496
2497 /* Invalidate any cached configuration */
2498 cmd.opcode = CMDQ_OP_CFGI_ALL;
2499 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
Robin Murphy2f657ad2017-08-31 14:44:25 +01002500 arm_smmu_cmdq_issue_sync(smmu);
Will Deacon48ec83b2015-05-27 17:25:59 +01002501
2502 /* Invalidate any stale TLB entries */
2503 if (smmu->features & ARM_SMMU_FEAT_HYP) {
2504 cmd.opcode = CMDQ_OP_TLBI_EL2_ALL;
2505 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2506 }
2507
2508 cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL;
2509 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
Robin Murphy2f657ad2017-08-31 14:44:25 +01002510 arm_smmu_cmdq_issue_sync(smmu);
Will Deacon48ec83b2015-05-27 17:25:59 +01002511
2512 /* Event queue */
2513 writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
Linu Cheriane5b829d2017-06-22 17:35:37 +05302514 writel_relaxed(smmu->evtq.q.prod,
2515 arm_smmu_page1_fixup(ARM_SMMU_EVTQ_PROD, smmu));
2516 writel_relaxed(smmu->evtq.q.cons,
2517 arm_smmu_page1_fixup(ARM_SMMU_EVTQ_CONS, smmu));
Will Deacon48ec83b2015-05-27 17:25:59 +01002518
2519 enables |= CR0_EVTQEN;
2520 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2521 ARM_SMMU_CR0ACK);
2522 if (ret) {
2523 dev_err(smmu->dev, "failed to enable event queue\n");
2524 return ret;
2525 }
2526
2527 /* PRI queue */
2528 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2529 writeq_relaxed(smmu->priq.q.q_base,
2530 smmu->base + ARM_SMMU_PRIQ_BASE);
2531 writel_relaxed(smmu->priq.q.prod,
Linu Cheriane5b829d2017-06-22 17:35:37 +05302532 arm_smmu_page1_fixup(ARM_SMMU_PRIQ_PROD, smmu));
Will Deacon48ec83b2015-05-27 17:25:59 +01002533 writel_relaxed(smmu->priq.q.cons,
Linu Cheriane5b829d2017-06-22 17:35:37 +05302534 arm_smmu_page1_fixup(ARM_SMMU_PRIQ_CONS, smmu));
Will Deacon48ec83b2015-05-27 17:25:59 +01002535
2536 enables |= CR0_PRIQEN;
2537 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2538 ARM_SMMU_CR0ACK);
2539 if (ret) {
2540 dev_err(smmu->dev, "failed to enable PRI queue\n");
2541 return ret;
2542 }
2543 }
2544
2545 ret = arm_smmu_setup_irqs(smmu);
2546 if (ret) {
2547 dev_err(smmu->dev, "failed to setup irqs\n");
2548 return ret;
2549 }
2550
Robin Murphydc87a982016-09-12 17:13:44 +01002551
2552 /* Enable the SMMU interface, or ensure bypass */
2553 if (!bypass || disable_bypass) {
2554 enables |= CR0_SMMUEN;
2555 } else {
2556 ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
2557 if (ret) {
2558 dev_err(smmu->dev, "GBPA not responding to update\n");
2559 return ret;
2560 }
2561 }
Will Deacon48ec83b2015-05-27 17:25:59 +01002562 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2563 ARM_SMMU_CR0ACK);
2564 if (ret) {
2565 dev_err(smmu->dev, "failed to enable SMMU interface\n");
2566 return ret;
2567 }
2568
2569 return 0;
2570}
2571
Lorenzo Pieralisi2985b522016-11-21 10:01:42 +00002572static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
Will Deacon48ec83b2015-05-27 17:25:59 +01002573{
2574 u32 reg;
Lorenzo Pieralisi2985b522016-11-21 10:01:42 +00002575 bool coherent = smmu->features & ARM_SMMU_FEAT_COHERENCY;
Will Deacon48ec83b2015-05-27 17:25:59 +01002576
2577 /* IDR0 */
2578 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
2579
2580 /* 2-level structures */
2581 if ((reg & IDR0_ST_LVL_MASK << IDR0_ST_LVL_SHIFT) == IDR0_ST_LVL_2LVL)
2582 smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB;
2583
2584 if (reg & IDR0_CD2L)
2585 smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB;
2586
2587 /*
2588 * Translation table endianness.
2589 * We currently require the same endianness as the CPU, but this
2590 * could be changed later by adding a new IO_PGTABLE_QUIRK.
2591 */
2592 switch (reg & IDR0_TTENDIAN_MASK << IDR0_TTENDIAN_SHIFT) {
2593 case IDR0_TTENDIAN_MIXED:
2594 smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE;
2595 break;
2596#ifdef __BIG_ENDIAN
2597 case IDR0_TTENDIAN_BE:
2598 smmu->features |= ARM_SMMU_FEAT_TT_BE;
2599 break;
2600#else
2601 case IDR0_TTENDIAN_LE:
2602 smmu->features |= ARM_SMMU_FEAT_TT_LE;
2603 break;
2604#endif
2605 default:
2606 dev_err(smmu->dev, "unknown/unsupported TT endianness!\n");
2607 return -ENXIO;
2608 }
2609
2610 /* Boolean feature flags */
2611 if (IS_ENABLED(CONFIG_PCI_PRI) && reg & IDR0_PRI)
2612 smmu->features |= ARM_SMMU_FEAT_PRI;
2613
2614 if (IS_ENABLED(CONFIG_PCI_ATS) && reg & IDR0_ATS)
2615 smmu->features |= ARM_SMMU_FEAT_ATS;
2616
2617 if (reg & IDR0_SEV)
2618 smmu->features |= ARM_SMMU_FEAT_SEV;
2619
2620 if (reg & IDR0_MSI)
2621 smmu->features |= ARM_SMMU_FEAT_MSI;
2622
2623 if (reg & IDR0_HYP)
2624 smmu->features |= ARM_SMMU_FEAT_HYP;
2625
2626 /*
Lorenzo Pieralisi2985b522016-11-21 10:01:42 +00002627 * The coherency feature as set by FW is used in preference to the ID
Will Deacon48ec83b2015-05-27 17:25:59 +01002628 * register, but warn on mismatch.
2629 */
Will Deacon48ec83b2015-05-27 17:25:59 +01002630 if (!!(reg & IDR0_COHACC) != coherent)
Robin Murphy2a22baa2017-09-25 14:55:40 +01002631 dev_warn(smmu->dev, "IDR0.COHACC overridden by FW configuration (%s)\n",
Will Deacon48ec83b2015-05-27 17:25:59 +01002632 coherent ? "true" : "false");
2633
Prem Mallappa6380be02015-12-14 22:01:23 +05302634 switch (reg & IDR0_STALL_MODEL_MASK << IDR0_STALL_MODEL_SHIFT) {
Prem Mallappa6380be02015-12-14 22:01:23 +05302635 case IDR0_STALL_MODEL_FORCE:
Yisheng Xie9cff86fd22017-09-21 20:36:07 +08002636 smmu->features |= ARM_SMMU_FEAT_STALL_FORCE;
2637 /* Fallthrough */
2638 case IDR0_STALL_MODEL_STALL:
Will Deacon48ec83b2015-05-27 17:25:59 +01002639 smmu->features |= ARM_SMMU_FEAT_STALLS;
Prem Mallappa6380be02015-12-14 22:01:23 +05302640 }
Will Deacon48ec83b2015-05-27 17:25:59 +01002641
2642 if (reg & IDR0_S1P)
2643 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
2644
2645 if (reg & IDR0_S2P)
2646 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
2647
2648 if (!(reg & (IDR0_S1P | IDR0_S2P))) {
2649 dev_err(smmu->dev, "no translation support!\n");
2650 return -ENXIO;
2651 }
2652
2653 /* We only support the AArch64 table format at present */
Will Deaconf0c453d2015-08-20 12:12:32 +01002654 switch (reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) {
2655 case IDR0_TTF_AARCH32_64:
2656 smmu->ias = 40;
2657 /* Fallthrough */
2658 case IDR0_TTF_AARCH64:
2659 break;
2660 default:
Will Deacon48ec83b2015-05-27 17:25:59 +01002661 dev_err(smmu->dev, "AArch64 table format not supported!\n");
2662 return -ENXIO;
2663 }
2664
2665 /* ASID/VMID sizes */
2666 smmu->asid_bits = reg & IDR0_ASID16 ? 16 : 8;
2667 smmu->vmid_bits = reg & IDR0_VMID16 ? 16 : 8;
2668
2669 /* IDR1 */
2670 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
2671 if (reg & (IDR1_TABLES_PRESET | IDR1_QUEUES_PRESET | IDR1_REL)) {
2672 dev_err(smmu->dev, "embedded implementation not supported\n");
2673 return -ENXIO;
2674 }
2675
2676 /* Queue sizes, capped at 4k */
2677 smmu->cmdq.q.max_n_shift = min((u32)CMDQ_MAX_SZ_SHIFT,
2678 reg >> IDR1_CMDQ_SHIFT & IDR1_CMDQ_MASK);
2679 if (!smmu->cmdq.q.max_n_shift) {
2680 /* Odd alignment restrictions on the base, so ignore for now */
2681 dev_err(smmu->dev, "unit-length command queue not supported\n");
2682 return -ENXIO;
2683 }
2684
2685 smmu->evtq.q.max_n_shift = min((u32)EVTQ_MAX_SZ_SHIFT,
2686 reg >> IDR1_EVTQ_SHIFT & IDR1_EVTQ_MASK);
2687 smmu->priq.q.max_n_shift = min((u32)PRIQ_MAX_SZ_SHIFT,
2688 reg >> IDR1_PRIQ_SHIFT & IDR1_PRIQ_MASK);
2689
2690 /* SID/SSID sizes */
2691 smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK;
2692 smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK;
2693
Nate Watterson692c4e42017-01-10 14:47:13 -05002694 /*
2695 * If the SMMU supports fewer bits than would fill a single L2 stream
2696 * table, use a linear table instead.
2697 */
2698 if (smmu->sid_bits <= STRTAB_SPLIT)
2699 smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB;
2700
Will Deacon48ec83b2015-05-27 17:25:59 +01002701 /* IDR5 */
2702 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
2703
2704 /* Maximum number of outstanding stalls */
2705 smmu->evtq.max_stalls = reg >> IDR5_STALL_MAX_SHIFT
2706 & IDR5_STALL_MAX_MASK;
2707
2708 /* Page sizes */
2709 if (reg & IDR5_GRAN64K)
Robin Murphyd5466352016-05-09 17:20:09 +01002710 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Will Deacon48ec83b2015-05-27 17:25:59 +01002711 if (reg & IDR5_GRAN16K)
Robin Murphyd5466352016-05-09 17:20:09 +01002712 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Will Deacon48ec83b2015-05-27 17:25:59 +01002713 if (reg & IDR5_GRAN4K)
Robin Murphyd5466352016-05-09 17:20:09 +01002714 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Will Deacon48ec83b2015-05-27 17:25:59 +01002715
Robin Murphyd5466352016-05-09 17:20:09 +01002716 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2717 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2718 else
2719 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
Will Deacon48ec83b2015-05-27 17:25:59 +01002720
2721 /* Output address size */
2722 switch (reg & IDR5_OAS_MASK << IDR5_OAS_SHIFT) {
2723 case IDR5_OAS_32_BIT:
2724 smmu->oas = 32;
2725 break;
2726 case IDR5_OAS_36_BIT:
2727 smmu->oas = 36;
2728 break;
2729 case IDR5_OAS_40_BIT:
2730 smmu->oas = 40;
2731 break;
2732 case IDR5_OAS_42_BIT:
2733 smmu->oas = 42;
2734 break;
2735 case IDR5_OAS_44_BIT:
2736 smmu->oas = 44;
2737 break;
Will Deacon85430962015-08-03 10:35:40 +01002738 default:
2739 dev_info(smmu->dev,
2740 "unknown output address size. Truncating to 48-bit\n");
2741 /* Fallthrough */
Will Deacon48ec83b2015-05-27 17:25:59 +01002742 case IDR5_OAS_48_BIT:
2743 smmu->oas = 48;
Will Deacon48ec83b2015-05-27 17:25:59 +01002744 }
2745
2746 /* Set the DMA mask for our table walker */
2747 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
2748 dev_warn(smmu->dev,
2749 "failed to set DMA mask for table walker\n");
2750
Will Deaconf0c453d2015-08-20 12:12:32 +01002751 smmu->ias = max(smmu->ias, smmu->oas);
Will Deacon48ec83b2015-05-27 17:25:59 +01002752
2753 dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
2754 smmu->ias, smmu->oas, smmu->features);
2755 return 0;
2756}
2757
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +00002758#ifdef CONFIG_ACPI
Linu Cheriane5b829d2017-06-22 17:35:37 +05302759static void acpi_smmu_get_options(u32 model, struct arm_smmu_device *smmu)
2760{
shameer99caf172017-05-17 10:12:05 +01002761 switch (model) {
2762 case ACPI_IORT_SMMU_V3_CAVIUM_CN99XX:
Linu Cheriane5b829d2017-06-22 17:35:37 +05302763 smmu->options |= ARM_SMMU_OPT_PAGE0_REGS_ONLY;
shameer99caf172017-05-17 10:12:05 +01002764 break;
Robin Murphy6948d4a2017-09-22 15:04:00 +01002765 case ACPI_IORT_SMMU_V3_HISILICON_HI161X:
shameer99caf172017-05-17 10:12:05 +01002766 smmu->options |= ARM_SMMU_OPT_SKIP_PREFETCH;
2767 break;
2768 }
Linu Cheriane5b829d2017-06-22 17:35:37 +05302769
2770 dev_notice(smmu->dev, "option mask 0x%x\n", smmu->options);
2771}
2772
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +00002773static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2774 struct arm_smmu_device *smmu)
2775{
2776 struct acpi_iort_smmu_v3 *iort_smmu;
2777 struct device *dev = smmu->dev;
2778 struct acpi_iort_node *node;
2779
2780 node = *(struct acpi_iort_node **)dev_get_platdata(dev);
2781
2782 /* Retrieve SMMUv3 specific data */
2783 iort_smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
2784
Linu Cheriane5b829d2017-06-22 17:35:37 +05302785 acpi_smmu_get_options(iort_smmu->model, smmu);
2786
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +00002787 if (iort_smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE)
2788 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
2789
2790 return 0;
2791}
2792#else
2793static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2794 struct arm_smmu_device *smmu)
2795{
2796 return -ENODEV;
2797}
2798#endif
2799
Lorenzo Pieralisi2985b522016-11-21 10:01:42 +00002800static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2801 struct arm_smmu_device *smmu)
Will Deacon48ec83b2015-05-27 17:25:59 +01002802{
Will Deacon48ec83b2015-05-27 17:25:59 +01002803 struct device *dev = &pdev->dev;
Robin Murphydc87a982016-09-12 17:13:44 +01002804 u32 cells;
Lorenzo Pieralisi2985b522016-11-21 10:01:42 +00002805 int ret = -EINVAL;
Robin Murphydc87a982016-09-12 17:13:44 +01002806
2807 if (of_property_read_u32(dev->of_node, "#iommu-cells", &cells))
2808 dev_err(dev, "missing #iommu-cells property\n");
2809 else if (cells != 1)
2810 dev_err(dev, "invalid #iommu-cells value (%d)\n", cells);
2811 else
Lorenzo Pieralisi2985b522016-11-21 10:01:42 +00002812 ret = 0;
2813
2814 parse_driver_options(smmu);
2815
2816 if (of_dma_is_coherent(dev->of_node))
2817 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
2818
2819 return ret;
2820}
2821
Linu Cheriane5b829d2017-06-22 17:35:37 +05302822static unsigned long arm_smmu_resource_size(struct arm_smmu_device *smmu)
2823{
2824 if (smmu->options & ARM_SMMU_OPT_PAGE0_REGS_ONLY)
2825 return SZ_64K;
2826 else
2827 return SZ_128K;
2828}
2829
Lorenzo Pieralisi2985b522016-11-21 10:01:42 +00002830static int arm_smmu_device_probe(struct platform_device *pdev)
2831{
2832 int irq, ret;
2833 struct resource *res;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002834 resource_size_t ioaddr;
Lorenzo Pieralisi2985b522016-11-21 10:01:42 +00002835 struct arm_smmu_device *smmu;
2836 struct device *dev = &pdev->dev;
2837 bool bypass;
Will Deacon48ec83b2015-05-27 17:25:59 +01002838
2839 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2840 if (!smmu) {
2841 dev_err(dev, "failed to allocate arm_smmu_device\n");
2842 return -ENOMEM;
2843 }
2844 smmu->dev = dev;
2845
Linu Cheriane5b829d2017-06-22 17:35:37 +05302846 if (dev->of_node) {
2847 ret = arm_smmu_device_dt_probe(pdev, smmu);
2848 } else {
2849 ret = arm_smmu_device_acpi_probe(pdev, smmu);
2850 if (ret == -ENODEV)
2851 return ret;
2852 }
2853
2854 /* Set bypass mode according to firmware probing result */
2855 bypass = !!ret;
2856
Will Deacon48ec83b2015-05-27 17:25:59 +01002857 /* Base address */
2858 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Linu Cheriane5b829d2017-06-22 17:35:37 +05302859 if (resource_size(res) + 1 < arm_smmu_resource_size(smmu)) {
Will Deacon48ec83b2015-05-27 17:25:59 +01002860 dev_err(dev, "MMIO region too small (%pr)\n", res);
2861 return -EINVAL;
2862 }
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002863 ioaddr = res->start;
Will Deacon48ec83b2015-05-27 17:25:59 +01002864
2865 smmu->base = devm_ioremap_resource(dev, res);
2866 if (IS_ERR(smmu->base))
2867 return PTR_ERR(smmu->base);
2868
2869 /* Interrupt lines */
Will Deacon48ec83b2015-05-27 17:25:59 +01002870
Geetha Sowjanyaf9354482017-06-23 19:04:36 +05302871 irq = platform_get_irq_byname(pdev, "combined");
Will Deacon48ec83b2015-05-27 17:25:59 +01002872 if (irq > 0)
Geetha Sowjanyaf9354482017-06-23 19:04:36 +05302873 smmu->combined_irq = irq;
2874 else {
2875 irq = platform_get_irq_byname(pdev, "eventq");
2876 if (irq > 0)
2877 smmu->evtq.q.irq = irq;
Will Deacon48ec83b2015-05-27 17:25:59 +01002878
Geetha Sowjanyaf9354482017-06-23 19:04:36 +05302879 irq = platform_get_irq_byname(pdev, "priq");
2880 if (irq > 0)
2881 smmu->priq.q.irq = irq;
Will Deacon48ec83b2015-05-27 17:25:59 +01002882
Geetha Sowjanyaf9354482017-06-23 19:04:36 +05302883 irq = platform_get_irq_byname(pdev, "gerror");
2884 if (irq > 0)
2885 smmu->gerr_irq = irq;
2886 }
Will Deacon48ec83b2015-05-27 17:25:59 +01002887 /* Probe the h/w */
Lorenzo Pieralisi2985b522016-11-21 10:01:42 +00002888 ret = arm_smmu_device_hw_probe(smmu);
Will Deacon48ec83b2015-05-27 17:25:59 +01002889 if (ret)
2890 return ret;
2891
2892 /* Initialise in-memory data structures */
2893 ret = arm_smmu_init_structures(smmu);
2894 if (ret)
2895 return ret;
2896
Marc Zyngier166bdbd2015-10-13 18:32:30 +01002897 /* Record our private device structure */
2898 platform_set_drvdata(pdev, smmu);
2899
Will Deacon48ec83b2015-05-27 17:25:59 +01002900 /* Reset the device */
Robin Murphy8f785152016-09-12 17:13:45 +01002901 ret = arm_smmu_device_reset(smmu, bypass);
2902 if (ret)
2903 return ret;
2904
2905 /* And we're up. Go go go! */
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002906 ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL,
2907 "smmu3.%pa", &ioaddr);
Robin Murphy08d4ca22016-09-12 17:13:46 +01002908 if (ret)
2909 return ret;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002910
2911 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2912 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2913
2914 ret = iommu_device_register(&smmu->iommu);
Arvind Yadav5c2d0212017-06-22 12:57:42 +05302915 if (ret) {
2916 dev_err(dev, "Failed to register iommu\n");
2917 return ret;
2918 }
Lorenzo Pieralisi778de072016-11-21 10:01:38 +00002919
Robin Murphy8f785152016-09-12 17:13:45 +01002920#ifdef CONFIG_PCI
Robin Murphyec615f42016-11-03 17:39:07 +00002921 if (pci_bus_type.iommu_ops != &arm_smmu_ops) {
2922 pci_request_acs();
2923 ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2924 if (ret)
2925 return ret;
2926 }
Robin Murphy08d4ca22016-09-12 17:13:46 +01002927#endif
2928#ifdef CONFIG_ARM_AMBA
Robin Murphyec615f42016-11-03 17:39:07 +00002929 if (amba_bustype.iommu_ops != &arm_smmu_ops) {
2930 ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2931 if (ret)
2932 return ret;
2933 }
Robin Murphy08d4ca22016-09-12 17:13:46 +01002934#endif
Robin Murphyec615f42016-11-03 17:39:07 +00002935 if (platform_bus_type.iommu_ops != &arm_smmu_ops) {
2936 ret = bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2937 if (ret)
2938 return ret;
2939 }
2940 return 0;
Will Deacon48ec83b2015-05-27 17:25:59 +01002941}
2942
2943static int arm_smmu_device_remove(struct platform_device *pdev)
2944{
Will Deacon941a8022015-08-11 16:25:10 +01002945 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon48ec83b2015-05-27 17:25:59 +01002946
2947 arm_smmu_device_disable(smmu);
Nate Watterson7aa86192017-06-29 18:18:15 -04002948
Will Deacon48ec83b2015-05-27 17:25:59 +01002949 return 0;
2950}
2951
Nate Watterson7aa86192017-06-29 18:18:15 -04002952static void arm_smmu_device_shutdown(struct platform_device *pdev)
2953{
2954 arm_smmu_device_remove(pdev);
2955}
2956
Arvind Yadavebdd13c2017-06-22 12:51:00 +05302957static const struct of_device_id arm_smmu_of_match[] = {
Will Deacon48ec83b2015-05-27 17:25:59 +01002958 { .compatible = "arm,smmu-v3", },
2959 { },
2960};
2961MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2962
2963static struct platform_driver arm_smmu_driver = {
2964 .driver = {
2965 .name = "arm-smmu-v3",
2966 .of_match_table = of_match_ptr(arm_smmu_of_match),
2967 },
Lorenzo Pieralisi2985b522016-11-21 10:01:42 +00002968 .probe = arm_smmu_device_probe,
Will Deacon48ec83b2015-05-27 17:25:59 +01002969 .remove = arm_smmu_device_remove,
Nate Watterson7aa86192017-06-29 18:18:15 -04002970 .shutdown = arm_smmu_device_shutdown,
Will Deacon48ec83b2015-05-27 17:25:59 +01002971};
Robin Murphyf6810c12017-04-10 16:51:05 +05302972module_platform_driver(arm_smmu_driver);
Will Deacon48ec83b2015-05-27 17:25:59 +01002973
Robin Murphyb0c560f2018-01-09 16:17:27 +00002974IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3");
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +00002975
Will Deacon48ec83b2015-05-27 17:25:59 +01002976MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
2977MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2978MODULE_LICENSE("GPL v2");