blob: 18a0fa7dd72d37014c9c50833916bc065b2d1a00 [file] [log] [blame]
Will Deacon48ec83b2015-05-27 17:25:59 +01001/*
2 * IOMMU API for ARM architected SMMUv3 implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 * Copyright (C) 2015 ARM Limited
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 *
20 * This driver is powered by bad coffee and bombay mix.
21 */
22
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +000023#include <linux/acpi.h>
24#include <linux/acpi_iort.h>
Will Deacon48ec83b2015-05-27 17:25:59 +010025#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000026#include <linux/dma-iommu.h>
Will Deacon48ec83b2015-05-27 17:25:59 +010027#include <linux/err.h>
28#include <linux/interrupt.h>
29#include <linux/iommu.h>
30#include <linux/iopoll.h>
31#include <linux/module.h>
Marc Zyngier166bdbd2015-10-13 18:32:30 +010032#include <linux/msi.h>
Will Deacon48ec83b2015-05-27 17:25:59 +010033#include <linux/of.h>
34#include <linux/of_address.h>
Robin Murphy8f785152016-09-12 17:13:45 +010035#include <linux/of_iommu.h>
Will Deacon941a8022015-08-11 16:25:10 +010036#include <linux/of_platform.h>
Will Deacon48ec83b2015-05-27 17:25:59 +010037#include <linux/pci.h>
38#include <linux/platform_device.h>
39
Robin Murphy08d4ca22016-09-12 17:13:46 +010040#include <linux/amba/bus.h>
41
Will Deacon48ec83b2015-05-27 17:25:59 +010042#include "io-pgtable.h"
43
44/* MMIO registers */
45#define ARM_SMMU_IDR0 0x0
46#define IDR0_ST_LVL_SHIFT 27
47#define IDR0_ST_LVL_MASK 0x3
48#define IDR0_ST_LVL_2LVL (1 << IDR0_ST_LVL_SHIFT)
Prem Mallappa6380be02015-12-14 22:01:23 +053049#define IDR0_STALL_MODEL_SHIFT 24
50#define IDR0_STALL_MODEL_MASK 0x3
51#define IDR0_STALL_MODEL_STALL (0 << IDR0_STALL_MODEL_SHIFT)
52#define IDR0_STALL_MODEL_FORCE (2 << IDR0_STALL_MODEL_SHIFT)
Will Deacon48ec83b2015-05-27 17:25:59 +010053#define IDR0_TTENDIAN_SHIFT 21
54#define IDR0_TTENDIAN_MASK 0x3
55#define IDR0_TTENDIAN_LE (2 << IDR0_TTENDIAN_SHIFT)
56#define IDR0_TTENDIAN_BE (3 << IDR0_TTENDIAN_SHIFT)
57#define IDR0_TTENDIAN_MIXED (0 << IDR0_TTENDIAN_SHIFT)
58#define IDR0_CD2L (1 << 19)
59#define IDR0_VMID16 (1 << 18)
60#define IDR0_PRI (1 << 16)
61#define IDR0_SEV (1 << 14)
62#define IDR0_MSI (1 << 13)
63#define IDR0_ASID16 (1 << 12)
64#define IDR0_ATS (1 << 10)
65#define IDR0_HYP (1 << 9)
66#define IDR0_COHACC (1 << 4)
67#define IDR0_TTF_SHIFT 2
68#define IDR0_TTF_MASK 0x3
69#define IDR0_TTF_AARCH64 (2 << IDR0_TTF_SHIFT)
Will Deaconf0c453d2015-08-20 12:12:32 +010070#define IDR0_TTF_AARCH32_64 (3 << IDR0_TTF_SHIFT)
Will Deacon48ec83b2015-05-27 17:25:59 +010071#define IDR0_S1P (1 << 1)
72#define IDR0_S2P (1 << 0)
73
74#define ARM_SMMU_IDR1 0x4
75#define IDR1_TABLES_PRESET (1 << 30)
76#define IDR1_QUEUES_PRESET (1 << 29)
77#define IDR1_REL (1 << 28)
78#define IDR1_CMDQ_SHIFT 21
79#define IDR1_CMDQ_MASK 0x1f
80#define IDR1_EVTQ_SHIFT 16
81#define IDR1_EVTQ_MASK 0x1f
82#define IDR1_PRIQ_SHIFT 11
83#define IDR1_PRIQ_MASK 0x1f
84#define IDR1_SSID_SHIFT 6
85#define IDR1_SSID_MASK 0x1f
86#define IDR1_SID_SHIFT 0
87#define IDR1_SID_MASK 0x3f
88
89#define ARM_SMMU_IDR5 0x14
90#define IDR5_STALL_MAX_SHIFT 16
91#define IDR5_STALL_MAX_MASK 0xffff
92#define IDR5_GRAN64K (1 << 6)
93#define IDR5_GRAN16K (1 << 5)
94#define IDR5_GRAN4K (1 << 4)
95#define IDR5_OAS_SHIFT 0
96#define IDR5_OAS_MASK 0x7
97#define IDR5_OAS_32_BIT (0 << IDR5_OAS_SHIFT)
98#define IDR5_OAS_36_BIT (1 << IDR5_OAS_SHIFT)
99#define IDR5_OAS_40_BIT (2 << IDR5_OAS_SHIFT)
100#define IDR5_OAS_42_BIT (3 << IDR5_OAS_SHIFT)
101#define IDR5_OAS_44_BIT (4 << IDR5_OAS_SHIFT)
102#define IDR5_OAS_48_BIT (5 << IDR5_OAS_SHIFT)
103
104#define ARM_SMMU_CR0 0x20
105#define CR0_CMDQEN (1 << 3)
106#define CR0_EVTQEN (1 << 2)
107#define CR0_PRIQEN (1 << 1)
108#define CR0_SMMUEN (1 << 0)
109
110#define ARM_SMMU_CR0ACK 0x24
111
112#define ARM_SMMU_CR1 0x28
113#define CR1_SH_NSH 0
114#define CR1_SH_OSH 2
115#define CR1_SH_ISH 3
116#define CR1_CACHE_NC 0
117#define CR1_CACHE_WB 1
118#define CR1_CACHE_WT 2
119#define CR1_TABLE_SH_SHIFT 10
120#define CR1_TABLE_OC_SHIFT 8
121#define CR1_TABLE_IC_SHIFT 6
122#define CR1_QUEUE_SH_SHIFT 4
123#define CR1_QUEUE_OC_SHIFT 2
124#define CR1_QUEUE_IC_SHIFT 0
125
126#define ARM_SMMU_CR2 0x2c
127#define CR2_PTM (1 << 2)
128#define CR2_RECINVSID (1 << 1)
129#define CR2_E2H (1 << 0)
130
Robin Murphydc87a982016-09-12 17:13:44 +0100131#define ARM_SMMU_GBPA 0x44
132#define GBPA_ABORT (1 << 20)
133#define GBPA_UPDATE (1 << 31)
134
Will Deacon48ec83b2015-05-27 17:25:59 +0100135#define ARM_SMMU_IRQ_CTRL 0x50
136#define IRQ_CTRL_EVTQ_IRQEN (1 << 2)
Marc Zyngierccd63852015-07-15 11:55:18 +0100137#define IRQ_CTRL_PRIQ_IRQEN (1 << 1)
Will Deacon48ec83b2015-05-27 17:25:59 +0100138#define IRQ_CTRL_GERROR_IRQEN (1 << 0)
139
140#define ARM_SMMU_IRQ_CTRLACK 0x54
141
142#define ARM_SMMU_GERROR 0x60
143#define GERROR_SFM_ERR (1 << 8)
144#define GERROR_MSI_GERROR_ABT_ERR (1 << 7)
145#define GERROR_MSI_PRIQ_ABT_ERR (1 << 6)
146#define GERROR_MSI_EVTQ_ABT_ERR (1 << 5)
147#define GERROR_MSI_CMDQ_ABT_ERR (1 << 4)
148#define GERROR_PRIQ_ABT_ERR (1 << 3)
149#define GERROR_EVTQ_ABT_ERR (1 << 2)
150#define GERROR_CMDQ_ERR (1 << 0)
151#define GERROR_ERR_MASK 0xfd
152
153#define ARM_SMMU_GERRORN 0x64
154
155#define ARM_SMMU_GERROR_IRQ_CFG0 0x68
156#define ARM_SMMU_GERROR_IRQ_CFG1 0x70
157#define ARM_SMMU_GERROR_IRQ_CFG2 0x74
158
159#define ARM_SMMU_STRTAB_BASE 0x80
160#define STRTAB_BASE_RA (1UL << 62)
161#define STRTAB_BASE_ADDR_SHIFT 6
162#define STRTAB_BASE_ADDR_MASK 0x3ffffffffffUL
163
164#define ARM_SMMU_STRTAB_BASE_CFG 0x88
165#define STRTAB_BASE_CFG_LOG2SIZE_SHIFT 0
166#define STRTAB_BASE_CFG_LOG2SIZE_MASK 0x3f
167#define STRTAB_BASE_CFG_SPLIT_SHIFT 6
168#define STRTAB_BASE_CFG_SPLIT_MASK 0x1f
169#define STRTAB_BASE_CFG_FMT_SHIFT 16
170#define STRTAB_BASE_CFG_FMT_MASK 0x3
171#define STRTAB_BASE_CFG_FMT_LINEAR (0 << STRTAB_BASE_CFG_FMT_SHIFT)
172#define STRTAB_BASE_CFG_FMT_2LVL (1 << STRTAB_BASE_CFG_FMT_SHIFT)
173
174#define ARM_SMMU_CMDQ_BASE 0x90
175#define ARM_SMMU_CMDQ_PROD 0x98
176#define ARM_SMMU_CMDQ_CONS 0x9c
177
178#define ARM_SMMU_EVTQ_BASE 0xa0
179#define ARM_SMMU_EVTQ_PROD 0x100a8
180#define ARM_SMMU_EVTQ_CONS 0x100ac
181#define ARM_SMMU_EVTQ_IRQ_CFG0 0xb0
182#define ARM_SMMU_EVTQ_IRQ_CFG1 0xb8
183#define ARM_SMMU_EVTQ_IRQ_CFG2 0xbc
184
185#define ARM_SMMU_PRIQ_BASE 0xc0
186#define ARM_SMMU_PRIQ_PROD 0x100c8
187#define ARM_SMMU_PRIQ_CONS 0x100cc
188#define ARM_SMMU_PRIQ_IRQ_CFG0 0xd0
189#define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8
190#define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc
191
192/* Common MSI config fields */
Will Deacon48ec83b2015-05-27 17:25:59 +0100193#define MSI_CFG0_ADDR_SHIFT 2
194#define MSI_CFG0_ADDR_MASK 0x3fffffffffffUL
Marc Zyngierec11d632015-07-15 11:55:19 +0100195#define MSI_CFG2_SH_SHIFT 4
196#define MSI_CFG2_SH_NSH (0UL << MSI_CFG2_SH_SHIFT)
197#define MSI_CFG2_SH_OSH (2UL << MSI_CFG2_SH_SHIFT)
198#define MSI_CFG2_SH_ISH (3UL << MSI_CFG2_SH_SHIFT)
199#define MSI_CFG2_MEMATTR_SHIFT 0
200#define MSI_CFG2_MEMATTR_DEVICE_nGnRE (0x1 << MSI_CFG2_MEMATTR_SHIFT)
Will Deacon48ec83b2015-05-27 17:25:59 +0100201
202#define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1))
203#define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift))
204#define Q_OVERFLOW_FLAG (1 << 31)
205#define Q_OVF(q, p) ((p) & Q_OVERFLOW_FLAG)
206#define Q_ENT(q, p) ((q)->base + \
207 Q_IDX(q, p) * (q)->ent_dwords)
208
209#define Q_BASE_RWA (1UL << 62)
210#define Q_BASE_ADDR_SHIFT 5
211#define Q_BASE_ADDR_MASK 0xfffffffffffUL
212#define Q_BASE_LOG2SIZE_SHIFT 0
213#define Q_BASE_LOG2SIZE_MASK 0x1fUL
214
215/*
216 * Stream table.
217 *
218 * Linear: Enough to cover 1 << IDR1.SIDSIZE entries
Zhen Leie2f4c232015-07-07 04:30:17 +0100219 * 2lvl: 128k L1 entries,
220 * 256 lazy entries per table (each table covers a PCI bus)
Will Deacon48ec83b2015-05-27 17:25:59 +0100221 */
Zhen Leie2f4c232015-07-07 04:30:17 +0100222#define STRTAB_L1_SZ_SHIFT 20
Will Deacon48ec83b2015-05-27 17:25:59 +0100223#define STRTAB_SPLIT 8
224
225#define STRTAB_L1_DESC_DWORDS 1
226#define STRTAB_L1_DESC_SPAN_SHIFT 0
227#define STRTAB_L1_DESC_SPAN_MASK 0x1fUL
228#define STRTAB_L1_DESC_L2PTR_SHIFT 6
229#define STRTAB_L1_DESC_L2PTR_MASK 0x3ffffffffffUL
230
231#define STRTAB_STE_DWORDS 8
232#define STRTAB_STE_0_V (1UL << 0)
233#define STRTAB_STE_0_CFG_SHIFT 1
234#define STRTAB_STE_0_CFG_MASK 0x7UL
235#define STRTAB_STE_0_CFG_ABORT (0UL << STRTAB_STE_0_CFG_SHIFT)
236#define STRTAB_STE_0_CFG_BYPASS (4UL << STRTAB_STE_0_CFG_SHIFT)
237#define STRTAB_STE_0_CFG_S1_TRANS (5UL << STRTAB_STE_0_CFG_SHIFT)
238#define STRTAB_STE_0_CFG_S2_TRANS (6UL << STRTAB_STE_0_CFG_SHIFT)
239
240#define STRTAB_STE_0_S1FMT_SHIFT 4
241#define STRTAB_STE_0_S1FMT_LINEAR (0UL << STRTAB_STE_0_S1FMT_SHIFT)
242#define STRTAB_STE_0_S1CTXPTR_SHIFT 6
243#define STRTAB_STE_0_S1CTXPTR_MASK 0x3ffffffffffUL
244#define STRTAB_STE_0_S1CDMAX_SHIFT 59
245#define STRTAB_STE_0_S1CDMAX_MASK 0x1fUL
246
247#define STRTAB_STE_1_S1C_CACHE_NC 0UL
248#define STRTAB_STE_1_S1C_CACHE_WBRA 1UL
249#define STRTAB_STE_1_S1C_CACHE_WT 2UL
250#define STRTAB_STE_1_S1C_CACHE_WB 3UL
251#define STRTAB_STE_1_S1C_SH_NSH 0UL
252#define STRTAB_STE_1_S1C_SH_OSH 2UL
253#define STRTAB_STE_1_S1C_SH_ISH 3UL
254#define STRTAB_STE_1_S1CIR_SHIFT 2
255#define STRTAB_STE_1_S1COR_SHIFT 4
256#define STRTAB_STE_1_S1CSH_SHIFT 6
257
258#define STRTAB_STE_1_S1STALLD (1UL << 27)
259
260#define STRTAB_STE_1_EATS_ABT 0UL
261#define STRTAB_STE_1_EATS_TRANS 1UL
262#define STRTAB_STE_1_EATS_S1CHK 2UL
263#define STRTAB_STE_1_EATS_SHIFT 28
264
265#define STRTAB_STE_1_STRW_NSEL1 0UL
266#define STRTAB_STE_1_STRW_EL2 2UL
267#define STRTAB_STE_1_STRW_SHIFT 30
268
Will Deacona0eacd82015-11-18 18:15:51 +0000269#define STRTAB_STE_1_SHCFG_INCOMING 1UL
270#define STRTAB_STE_1_SHCFG_SHIFT 44
271
Will Deacon48ec83b2015-05-27 17:25:59 +0100272#define STRTAB_STE_2_S2VMID_SHIFT 0
273#define STRTAB_STE_2_S2VMID_MASK 0xffffUL
274#define STRTAB_STE_2_VTCR_SHIFT 32
275#define STRTAB_STE_2_VTCR_MASK 0x7ffffUL
276#define STRTAB_STE_2_S2AA64 (1UL << 51)
277#define STRTAB_STE_2_S2ENDI (1UL << 52)
278#define STRTAB_STE_2_S2PTW (1UL << 54)
279#define STRTAB_STE_2_S2R (1UL << 58)
280
281#define STRTAB_STE_3_S2TTB_SHIFT 4
282#define STRTAB_STE_3_S2TTB_MASK 0xfffffffffffUL
283
284/* Context descriptor (stage-1 only) */
285#define CTXDESC_CD_DWORDS 8
286#define CTXDESC_CD_0_TCR_T0SZ_SHIFT 0
287#define ARM64_TCR_T0SZ_SHIFT 0
288#define ARM64_TCR_T0SZ_MASK 0x1fUL
289#define CTXDESC_CD_0_TCR_TG0_SHIFT 6
290#define ARM64_TCR_TG0_SHIFT 14
291#define ARM64_TCR_TG0_MASK 0x3UL
292#define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8
Zhen Lei5d58c622015-06-26 09:32:59 +0100293#define ARM64_TCR_IRGN0_SHIFT 8
Will Deacon48ec83b2015-05-27 17:25:59 +0100294#define ARM64_TCR_IRGN0_MASK 0x3UL
295#define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10
Zhen Lei5d58c622015-06-26 09:32:59 +0100296#define ARM64_TCR_ORGN0_SHIFT 10
Will Deacon48ec83b2015-05-27 17:25:59 +0100297#define ARM64_TCR_ORGN0_MASK 0x3UL
298#define CTXDESC_CD_0_TCR_SH0_SHIFT 12
299#define ARM64_TCR_SH0_SHIFT 12
300#define ARM64_TCR_SH0_MASK 0x3UL
301#define CTXDESC_CD_0_TCR_EPD0_SHIFT 14
302#define ARM64_TCR_EPD0_SHIFT 7
303#define ARM64_TCR_EPD0_MASK 0x1UL
304#define CTXDESC_CD_0_TCR_EPD1_SHIFT 30
305#define ARM64_TCR_EPD1_SHIFT 23
306#define ARM64_TCR_EPD1_MASK 0x1UL
307
308#define CTXDESC_CD_0_ENDI (1UL << 15)
309#define CTXDESC_CD_0_V (1UL << 31)
310
311#define CTXDESC_CD_0_TCR_IPS_SHIFT 32
312#define ARM64_TCR_IPS_SHIFT 32
313#define ARM64_TCR_IPS_MASK 0x7UL
314#define CTXDESC_CD_0_TCR_TBI0_SHIFT 38
315#define ARM64_TCR_TBI0_SHIFT 37
316#define ARM64_TCR_TBI0_MASK 0x1UL
317
318#define CTXDESC_CD_0_AA64 (1UL << 41)
Yisheng Xie9cff86fd22017-09-21 20:36:07 +0800319#define CTXDESC_CD_0_S (1UL << 44)
Will Deacon48ec83b2015-05-27 17:25:59 +0100320#define CTXDESC_CD_0_R (1UL << 45)
321#define CTXDESC_CD_0_A (1UL << 46)
322#define CTXDESC_CD_0_ASET_SHIFT 47
323#define CTXDESC_CD_0_ASET_SHARED (0UL << CTXDESC_CD_0_ASET_SHIFT)
324#define CTXDESC_CD_0_ASET_PRIVATE (1UL << CTXDESC_CD_0_ASET_SHIFT)
325#define CTXDESC_CD_0_ASID_SHIFT 48
326#define CTXDESC_CD_0_ASID_MASK 0xffffUL
327
328#define CTXDESC_CD_1_TTB0_SHIFT 4
329#define CTXDESC_CD_1_TTB0_MASK 0xfffffffffffUL
330
331#define CTXDESC_CD_3_MAIR_SHIFT 0
332
333/* Convert between AArch64 (CPU) TCR format and SMMU CD format */
334#define ARM_SMMU_TCR2CD(tcr, fld) \
335 (((tcr) >> ARM64_TCR_##fld##_SHIFT & ARM64_TCR_##fld##_MASK) \
336 << CTXDESC_CD_0_TCR_##fld##_SHIFT)
337
338/* Command queue */
339#define CMDQ_ENT_DWORDS 2
340#define CMDQ_MAX_SZ_SHIFT 8
341
342#define CMDQ_ERR_SHIFT 24
343#define CMDQ_ERR_MASK 0x7f
344#define CMDQ_ERR_CERROR_NONE_IDX 0
345#define CMDQ_ERR_CERROR_ILL_IDX 1
346#define CMDQ_ERR_CERROR_ABT_IDX 2
347
348#define CMDQ_0_OP_SHIFT 0
349#define CMDQ_0_OP_MASK 0xffUL
350#define CMDQ_0_SSV (1UL << 11)
351
352#define CMDQ_PREFETCH_0_SID_SHIFT 32
353#define CMDQ_PREFETCH_1_SIZE_SHIFT 0
354#define CMDQ_PREFETCH_1_ADDR_MASK ~0xfffUL
355
356#define CMDQ_CFGI_0_SID_SHIFT 32
357#define CMDQ_CFGI_0_SID_MASK 0xffffffffUL
358#define CMDQ_CFGI_1_LEAF (1UL << 0)
359#define CMDQ_CFGI_1_RANGE_SHIFT 0
360#define CMDQ_CFGI_1_RANGE_MASK 0x1fUL
361
362#define CMDQ_TLBI_0_VMID_SHIFT 32
363#define CMDQ_TLBI_0_ASID_SHIFT 48
364#define CMDQ_TLBI_1_LEAF (1UL << 0)
Will Deacon1c27df12015-09-18 16:12:56 +0100365#define CMDQ_TLBI_1_VA_MASK ~0xfffUL
366#define CMDQ_TLBI_1_IPA_MASK 0xfffffffff000UL
Will Deacon48ec83b2015-05-27 17:25:59 +0100367
368#define CMDQ_PRI_0_SSID_SHIFT 12
369#define CMDQ_PRI_0_SSID_MASK 0xfffffUL
370#define CMDQ_PRI_0_SID_SHIFT 32
371#define CMDQ_PRI_0_SID_MASK 0xffffffffUL
372#define CMDQ_PRI_1_GRPID_SHIFT 0
373#define CMDQ_PRI_1_GRPID_MASK 0x1ffUL
374#define CMDQ_PRI_1_RESP_SHIFT 12
375#define CMDQ_PRI_1_RESP_DENY (0UL << CMDQ_PRI_1_RESP_SHIFT)
376#define CMDQ_PRI_1_RESP_FAIL (1UL << CMDQ_PRI_1_RESP_SHIFT)
377#define CMDQ_PRI_1_RESP_SUCC (2UL << CMDQ_PRI_1_RESP_SHIFT)
378
379#define CMDQ_SYNC_0_CS_SHIFT 12
380#define CMDQ_SYNC_0_CS_NONE (0UL << CMDQ_SYNC_0_CS_SHIFT)
381#define CMDQ_SYNC_0_CS_SEV (2UL << CMDQ_SYNC_0_CS_SHIFT)
382
383/* Event queue */
384#define EVTQ_ENT_DWORDS 4
385#define EVTQ_MAX_SZ_SHIFT 7
386
387#define EVTQ_0_ID_SHIFT 0
388#define EVTQ_0_ID_MASK 0xffUL
389
390/* PRI queue */
391#define PRIQ_ENT_DWORDS 2
392#define PRIQ_MAX_SZ_SHIFT 8
393
394#define PRIQ_0_SID_SHIFT 0
395#define PRIQ_0_SID_MASK 0xffffffffUL
396#define PRIQ_0_SSID_SHIFT 32
397#define PRIQ_0_SSID_MASK 0xfffffUL
Will Deacon48ec83b2015-05-27 17:25:59 +0100398#define PRIQ_0_PERM_PRIV (1UL << 58)
399#define PRIQ_0_PERM_EXEC (1UL << 59)
400#define PRIQ_0_PERM_READ (1UL << 60)
401#define PRIQ_0_PERM_WRITE (1UL << 61)
402#define PRIQ_0_PRG_LAST (1UL << 62)
403#define PRIQ_0_SSID_V (1UL << 63)
404
405#define PRIQ_1_PRG_IDX_SHIFT 0
406#define PRIQ_1_PRG_IDX_MASK 0x1ffUL
407#define PRIQ_1_ADDR_SHIFT 12
408#define PRIQ_1_ADDR_MASK 0xfffffffffffffUL
409
410/* High-level queue structures */
411#define ARM_SMMU_POLL_TIMEOUT_US 100
Sunil Gouthamb847de42017-05-05 16:47:46 +0530412#define ARM_SMMU_CMDQ_DRAIN_TIMEOUT_US 1000000 /* 1s! */
Will Deacon48ec83b2015-05-27 17:25:59 +0100413
Eric Auger50019f02017-01-19 20:57:56 +0000414#define MSI_IOVA_BASE 0x8000000
415#define MSI_IOVA_LENGTH 0x100000
416
Will Deacon48ec83b2015-05-27 17:25:59 +0100417static bool disable_bypass;
418module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
419MODULE_PARM_DESC(disable_bypass,
420 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
421
422enum pri_resp {
423 PRI_RESP_DENY,
424 PRI_RESP_FAIL,
425 PRI_RESP_SUCC,
426};
427
Marc Zyngier166bdbd2015-10-13 18:32:30 +0100428enum arm_smmu_msi_index {
429 EVTQ_MSI_INDEX,
430 GERROR_MSI_INDEX,
431 PRIQ_MSI_INDEX,
432 ARM_SMMU_MAX_MSIS,
433};
434
435static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = {
436 [EVTQ_MSI_INDEX] = {
437 ARM_SMMU_EVTQ_IRQ_CFG0,
438 ARM_SMMU_EVTQ_IRQ_CFG1,
439 ARM_SMMU_EVTQ_IRQ_CFG2,
440 },
441 [GERROR_MSI_INDEX] = {
442 ARM_SMMU_GERROR_IRQ_CFG0,
443 ARM_SMMU_GERROR_IRQ_CFG1,
444 ARM_SMMU_GERROR_IRQ_CFG2,
445 },
446 [PRIQ_MSI_INDEX] = {
447 ARM_SMMU_PRIQ_IRQ_CFG0,
448 ARM_SMMU_PRIQ_IRQ_CFG1,
449 ARM_SMMU_PRIQ_IRQ_CFG2,
450 },
451};
452
Will Deacon48ec83b2015-05-27 17:25:59 +0100453struct arm_smmu_cmdq_ent {
454 /* Common fields */
455 u8 opcode;
456 bool substream_valid;
457
458 /* Command-specific fields */
459 union {
460 #define CMDQ_OP_PREFETCH_CFG 0x1
461 struct {
462 u32 sid;
463 u8 size;
464 u64 addr;
465 } prefetch;
466
467 #define CMDQ_OP_CFGI_STE 0x3
468 #define CMDQ_OP_CFGI_ALL 0x4
469 struct {
470 u32 sid;
471 union {
472 bool leaf;
473 u8 span;
474 };
475 } cfgi;
476
477 #define CMDQ_OP_TLBI_NH_ASID 0x11
478 #define CMDQ_OP_TLBI_NH_VA 0x12
479 #define CMDQ_OP_TLBI_EL2_ALL 0x20
480 #define CMDQ_OP_TLBI_S12_VMALL 0x28
481 #define CMDQ_OP_TLBI_S2_IPA 0x2a
482 #define CMDQ_OP_TLBI_NSNH_ALL 0x30
483 struct {
484 u16 asid;
485 u16 vmid;
486 bool leaf;
487 u64 addr;
488 } tlbi;
489
490 #define CMDQ_OP_PRI_RESP 0x41
491 struct {
492 u32 sid;
493 u32 ssid;
494 u16 grpid;
495 enum pri_resp resp;
496 } pri;
497
498 #define CMDQ_OP_CMD_SYNC 0x46
499 };
500};
501
502struct arm_smmu_queue {
503 int irq; /* Wired interrupt */
504
505 __le64 *base;
506 dma_addr_t base_dma;
507 u64 q_base;
508
509 size_t ent_dwords;
510 u32 max_n_shift;
511 u32 prod;
512 u32 cons;
513
514 u32 __iomem *prod_reg;
515 u32 __iomem *cons_reg;
516};
517
518struct arm_smmu_cmdq {
519 struct arm_smmu_queue q;
520 spinlock_t lock;
521};
522
523struct arm_smmu_evtq {
524 struct arm_smmu_queue q;
525 u32 max_stalls;
526};
527
528struct arm_smmu_priq {
529 struct arm_smmu_queue q;
530};
531
532/* High-level stream table and context descriptor structures */
533struct arm_smmu_strtab_l1_desc {
534 u8 span;
535
536 __le64 *l2ptr;
537 dma_addr_t l2ptr_dma;
538};
539
540struct arm_smmu_s1_cfg {
541 __le64 *cdptr;
542 dma_addr_t cdptr_dma;
543
544 struct arm_smmu_ctx_desc {
545 u16 asid;
546 u64 ttbr;
547 u64 tcr;
548 u64 mair;
549 } cd;
550};
551
552struct arm_smmu_s2_cfg {
553 u16 vmid;
554 u64 vttbr;
555 u64 vtcr;
556};
557
558struct arm_smmu_strtab_ent {
Will Deaconbeb3c6a2017-01-06 16:27:30 +0000559 /*
560 * An STE is "assigned" if the master emitting the corresponding SID
561 * is attached to a domain. The behaviour of an unassigned STE is
562 * determined by the disable_bypass parameter, whereas an assigned
563 * STE behaves according to s1_cfg/s2_cfg, which themselves are
564 * configured according to the domain type.
565 */
566 bool assigned;
Will Deacon48ec83b2015-05-27 17:25:59 +0100567 struct arm_smmu_s1_cfg *s1_cfg;
568 struct arm_smmu_s2_cfg *s2_cfg;
569};
570
571struct arm_smmu_strtab_cfg {
572 __le64 *strtab;
573 dma_addr_t strtab_dma;
574 struct arm_smmu_strtab_l1_desc *l1_desc;
575 unsigned int num_l1_ents;
576
577 u64 strtab_base;
578 u32 strtab_base_cfg;
579};
580
581/* An SMMUv3 instance */
582struct arm_smmu_device {
583 struct device *dev;
584 void __iomem *base;
585
586#define ARM_SMMU_FEAT_2_LVL_STRTAB (1 << 0)
587#define ARM_SMMU_FEAT_2_LVL_CDTAB (1 << 1)
588#define ARM_SMMU_FEAT_TT_LE (1 << 2)
589#define ARM_SMMU_FEAT_TT_BE (1 << 3)
590#define ARM_SMMU_FEAT_PRI (1 << 4)
591#define ARM_SMMU_FEAT_ATS (1 << 5)
592#define ARM_SMMU_FEAT_SEV (1 << 6)
593#define ARM_SMMU_FEAT_MSI (1 << 7)
594#define ARM_SMMU_FEAT_COHERENCY (1 << 8)
595#define ARM_SMMU_FEAT_TRANS_S1 (1 << 9)
596#define ARM_SMMU_FEAT_TRANS_S2 (1 << 10)
597#define ARM_SMMU_FEAT_STALLS (1 << 11)
598#define ARM_SMMU_FEAT_HYP (1 << 12)
Yisheng Xie9cff86fd22017-09-21 20:36:07 +0800599#define ARM_SMMU_FEAT_STALL_FORCE (1 << 13)
Will Deacon48ec83b2015-05-27 17:25:59 +0100600 u32 features;
601
Zhen Lei5e929462015-07-07 04:30:18 +0100602#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
Linu Cheriane5b829d2017-06-22 17:35:37 +0530603#define ARM_SMMU_OPT_PAGE0_REGS_ONLY (1 << 1)
Zhen Lei5e929462015-07-07 04:30:18 +0100604 u32 options;
605
Will Deacon48ec83b2015-05-27 17:25:59 +0100606 struct arm_smmu_cmdq cmdq;
607 struct arm_smmu_evtq evtq;
608 struct arm_smmu_priq priq;
609
610 int gerr_irq;
Geetha Sowjanyaf9354482017-06-23 19:04:36 +0530611 int combined_irq;
Will Deacon48ec83b2015-05-27 17:25:59 +0100612
613 unsigned long ias; /* IPA */
614 unsigned long oas; /* PA */
Robin Murphyd5466352016-05-09 17:20:09 +0100615 unsigned long pgsize_bitmap;
Will Deacon48ec83b2015-05-27 17:25:59 +0100616
617#define ARM_SMMU_MAX_ASIDS (1 << 16)
618 unsigned int asid_bits;
619 DECLARE_BITMAP(asid_map, ARM_SMMU_MAX_ASIDS);
620
621#define ARM_SMMU_MAX_VMIDS (1 << 16)
622 unsigned int vmid_bits;
623 DECLARE_BITMAP(vmid_map, ARM_SMMU_MAX_VMIDS);
624
625 unsigned int ssid_bits;
626 unsigned int sid_bits;
627
628 struct arm_smmu_strtab_cfg strtab_cfg;
Joerg Roedel9648cbc2017-02-01 18:11:36 +0100629
630 /* IOMMU core code handle */
631 struct iommu_device iommu;
Will Deacon48ec83b2015-05-27 17:25:59 +0100632};
633
Robin Murphy8f785152016-09-12 17:13:45 +0100634/* SMMU private data for each master */
635struct arm_smmu_master_data {
Will Deacon48ec83b2015-05-27 17:25:59 +0100636 struct arm_smmu_device *smmu;
Will Deacon48ec83b2015-05-27 17:25:59 +0100637 struct arm_smmu_strtab_ent ste;
638};
639
640/* SMMU private data for an IOMMU domain */
641enum arm_smmu_domain_stage {
642 ARM_SMMU_DOMAIN_S1 = 0,
643 ARM_SMMU_DOMAIN_S2,
644 ARM_SMMU_DOMAIN_NESTED,
Will Deaconbeb3c6a2017-01-06 16:27:30 +0000645 ARM_SMMU_DOMAIN_BYPASS,
Will Deacon48ec83b2015-05-27 17:25:59 +0100646};
647
648struct arm_smmu_domain {
649 struct arm_smmu_device *smmu;
650 struct mutex init_mutex; /* Protects smmu pointer */
651
652 struct io_pgtable_ops *pgtbl_ops;
Will Deacon48ec83b2015-05-27 17:25:59 +0100653
654 enum arm_smmu_domain_stage stage;
655 union {
656 struct arm_smmu_s1_cfg s1_cfg;
657 struct arm_smmu_s2_cfg s2_cfg;
658 };
659
660 struct iommu_domain domain;
661};
662
Zhen Lei5e929462015-07-07 04:30:18 +0100663struct arm_smmu_option_prop {
664 u32 opt;
665 const char *prop;
666};
667
668static struct arm_smmu_option_prop arm_smmu_options[] = {
669 { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
Linu Cheriane5b829d2017-06-22 17:35:37 +0530670 { ARM_SMMU_OPT_PAGE0_REGS_ONLY, "cavium,cn9900-broken-page1-regspace"},
Zhen Lei5e929462015-07-07 04:30:18 +0100671 { 0, NULL},
672};
673
Linu Cheriane5b829d2017-06-22 17:35:37 +0530674static inline void __iomem *arm_smmu_page1_fixup(unsigned long offset,
675 struct arm_smmu_device *smmu)
676{
677 if ((offset > SZ_64K) &&
678 (smmu->options & ARM_SMMU_OPT_PAGE0_REGS_ONLY))
679 offset -= SZ_64K;
680
681 return smmu->base + offset;
682}
683
Will Deacon48ec83b2015-05-27 17:25:59 +0100684static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
685{
686 return container_of(dom, struct arm_smmu_domain, domain);
687}
688
Zhen Lei5e929462015-07-07 04:30:18 +0100689static void parse_driver_options(struct arm_smmu_device *smmu)
690{
691 int i = 0;
692
693 do {
694 if (of_property_read_bool(smmu->dev->of_node,
695 arm_smmu_options[i].prop)) {
696 smmu->options |= arm_smmu_options[i].opt;
697 dev_notice(smmu->dev, "option %s\n",
698 arm_smmu_options[i].prop);
699 }
700 } while (arm_smmu_options[++i].opt);
701}
702
Will Deacon48ec83b2015-05-27 17:25:59 +0100703/* Low-level queue manipulation functions */
704static bool queue_full(struct arm_smmu_queue *q)
705{
706 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
707 Q_WRP(q, q->prod) != Q_WRP(q, q->cons);
708}
709
710static bool queue_empty(struct arm_smmu_queue *q)
711{
712 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
713 Q_WRP(q, q->prod) == Q_WRP(q, q->cons);
714}
715
716static void queue_sync_cons(struct arm_smmu_queue *q)
717{
718 q->cons = readl_relaxed(q->cons_reg);
719}
720
721static void queue_inc_cons(struct arm_smmu_queue *q)
722{
723 u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
724
725 q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
726 writel(q->cons, q->cons_reg);
727}
728
729static int queue_sync_prod(struct arm_smmu_queue *q)
730{
731 int ret = 0;
732 u32 prod = readl_relaxed(q->prod_reg);
733
734 if (Q_OVF(q, prod) != Q_OVF(q, q->prod))
735 ret = -EOVERFLOW;
736
737 q->prod = prod;
738 return ret;
739}
740
741static void queue_inc_prod(struct arm_smmu_queue *q)
742{
743 u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + 1;
744
745 q->prod = Q_OVF(q, q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod);
746 writel(q->prod, q->prod_reg);
747}
748
Jean-Philippe Bruckerbcfced12016-09-05 14:09:53 +0100749/*
750 * Wait for the SMMU to consume items. If drain is true, wait until the queue
751 * is empty. Otherwise, wait until there is at least one free slot.
752 */
753static int queue_poll_cons(struct arm_smmu_queue *q, bool drain, bool wfe)
Will Deacon48ec83b2015-05-27 17:25:59 +0100754{
Sunil Gouthamb847de42017-05-05 16:47:46 +0530755 ktime_t timeout;
756 unsigned int delay = 1;
757
758 /* Wait longer if it's queue drain */
759 timeout = ktime_add_us(ktime_get(), drain ?
760 ARM_SMMU_CMDQ_DRAIN_TIMEOUT_US :
761 ARM_SMMU_POLL_TIMEOUT_US);
Will Deacon48ec83b2015-05-27 17:25:59 +0100762
Jean-Philippe Bruckerbcfced12016-09-05 14:09:53 +0100763 while (queue_sync_cons(q), (drain ? !queue_empty(q) : queue_full(q))) {
Will Deacon48ec83b2015-05-27 17:25:59 +0100764 if (ktime_compare(ktime_get(), timeout) > 0)
765 return -ETIMEDOUT;
766
767 if (wfe) {
768 wfe();
769 } else {
770 cpu_relax();
Sunil Gouthamb847de42017-05-05 16:47:46 +0530771 udelay(delay);
772 delay *= 2;
Will Deacon48ec83b2015-05-27 17:25:59 +0100773 }
774 }
775
776 return 0;
777}
778
779static void queue_write(__le64 *dst, u64 *src, size_t n_dwords)
780{
781 int i;
782
783 for (i = 0; i < n_dwords; ++i)
784 *dst++ = cpu_to_le64(*src++);
785}
786
787static int queue_insert_raw(struct arm_smmu_queue *q, u64 *ent)
788{
789 if (queue_full(q))
790 return -ENOSPC;
791
792 queue_write(Q_ENT(q, q->prod), ent, q->ent_dwords);
793 queue_inc_prod(q);
794 return 0;
795}
796
797static void queue_read(__le64 *dst, u64 *src, size_t n_dwords)
798{
799 int i;
800
801 for (i = 0; i < n_dwords; ++i)
802 *dst++ = le64_to_cpu(*src++);
803}
804
805static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
806{
807 if (queue_empty(q))
808 return -EAGAIN;
809
810 queue_read(ent, Q_ENT(q, q->cons), q->ent_dwords);
811 queue_inc_cons(q);
812 return 0;
813}
814
815/* High-level queue accessors */
816static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
817{
818 memset(cmd, 0, CMDQ_ENT_DWORDS << 3);
819 cmd[0] |= (ent->opcode & CMDQ_0_OP_MASK) << CMDQ_0_OP_SHIFT;
820
821 switch (ent->opcode) {
822 case CMDQ_OP_TLBI_EL2_ALL:
823 case CMDQ_OP_TLBI_NSNH_ALL:
824 break;
825 case CMDQ_OP_PREFETCH_CFG:
826 cmd[0] |= (u64)ent->prefetch.sid << CMDQ_PREFETCH_0_SID_SHIFT;
827 cmd[1] |= ent->prefetch.size << CMDQ_PREFETCH_1_SIZE_SHIFT;
828 cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK;
829 break;
830 case CMDQ_OP_CFGI_STE:
831 cmd[0] |= (u64)ent->cfgi.sid << CMDQ_CFGI_0_SID_SHIFT;
832 cmd[1] |= ent->cfgi.leaf ? CMDQ_CFGI_1_LEAF : 0;
833 break;
834 case CMDQ_OP_CFGI_ALL:
835 /* Cover the entire SID range */
836 cmd[1] |= CMDQ_CFGI_1_RANGE_MASK << CMDQ_CFGI_1_RANGE_SHIFT;
837 break;
838 case CMDQ_OP_TLBI_NH_VA:
839 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
Will Deacon1c27df12015-09-18 16:12:56 +0100840 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
841 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
842 break;
Will Deacon48ec83b2015-05-27 17:25:59 +0100843 case CMDQ_OP_TLBI_S2_IPA:
844 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
845 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
Will Deacon1c27df12015-09-18 16:12:56 +0100846 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK;
Will Deacon48ec83b2015-05-27 17:25:59 +0100847 break;
848 case CMDQ_OP_TLBI_NH_ASID:
849 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
850 /* Fallthrough */
851 case CMDQ_OP_TLBI_S12_VMALL:
852 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
853 break;
854 case CMDQ_OP_PRI_RESP:
855 cmd[0] |= ent->substream_valid ? CMDQ_0_SSV : 0;
856 cmd[0] |= ent->pri.ssid << CMDQ_PRI_0_SSID_SHIFT;
857 cmd[0] |= (u64)ent->pri.sid << CMDQ_PRI_0_SID_SHIFT;
858 cmd[1] |= ent->pri.grpid << CMDQ_PRI_1_GRPID_SHIFT;
859 switch (ent->pri.resp) {
860 case PRI_RESP_DENY:
861 cmd[1] |= CMDQ_PRI_1_RESP_DENY;
862 break;
863 case PRI_RESP_FAIL:
864 cmd[1] |= CMDQ_PRI_1_RESP_FAIL;
865 break;
866 case PRI_RESP_SUCC:
867 cmd[1] |= CMDQ_PRI_1_RESP_SUCC;
868 break;
869 default:
870 return -EINVAL;
871 }
872 break;
873 case CMDQ_OP_CMD_SYNC:
874 cmd[0] |= CMDQ_SYNC_0_CS_SEV;
875 break;
876 default:
877 return -ENOENT;
878 }
879
880 return 0;
881}
882
883static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
884{
885 static const char *cerror_str[] = {
886 [CMDQ_ERR_CERROR_NONE_IDX] = "No error",
887 [CMDQ_ERR_CERROR_ILL_IDX] = "Illegal command",
888 [CMDQ_ERR_CERROR_ABT_IDX] = "Abort on command fetch",
889 };
890
891 int i;
892 u64 cmd[CMDQ_ENT_DWORDS];
893 struct arm_smmu_queue *q = &smmu->cmdq.q;
894 u32 cons = readl_relaxed(q->cons_reg);
895 u32 idx = cons >> CMDQ_ERR_SHIFT & CMDQ_ERR_MASK;
896 struct arm_smmu_cmdq_ent cmd_sync = {
897 .opcode = CMDQ_OP_CMD_SYNC,
898 };
899
900 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons,
Will Deacona0d5c042015-12-04 12:00:29 +0000901 idx < ARRAY_SIZE(cerror_str) ? cerror_str[idx] : "Unknown");
Will Deacon48ec83b2015-05-27 17:25:59 +0100902
903 switch (idx) {
Will Deacon48ec83b2015-05-27 17:25:59 +0100904 case CMDQ_ERR_CERROR_ABT_IDX:
905 dev_err(smmu->dev, "retrying command fetch\n");
906 case CMDQ_ERR_CERROR_NONE_IDX:
907 return;
Will Deacona0d5c042015-12-04 12:00:29 +0000908 case CMDQ_ERR_CERROR_ILL_IDX:
909 /* Fallthrough */
910 default:
911 break;
Will Deacon48ec83b2015-05-27 17:25:59 +0100912 }
913
914 /*
915 * We may have concurrent producers, so we need to be careful
916 * not to touch any of the shadow cmdq state.
917 */
Will Deaconaea20372016-07-29 11:15:37 +0100918 queue_read(cmd, Q_ENT(q, cons), q->ent_dwords);
Will Deacon48ec83b2015-05-27 17:25:59 +0100919 dev_err(smmu->dev, "skipping command in error state:\n");
920 for (i = 0; i < ARRAY_SIZE(cmd); ++i)
921 dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
922
923 /* Convert the erroneous command into a CMD_SYNC */
924 if (arm_smmu_cmdq_build_cmd(cmd, &cmd_sync)) {
925 dev_err(smmu->dev, "failed to convert to CMD_SYNC\n");
926 return;
927 }
928
Will Deaconaea20372016-07-29 11:15:37 +0100929 queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
Will Deacon48ec83b2015-05-27 17:25:59 +0100930}
931
Robin Murphy2f657ad2017-08-31 14:44:25 +0100932static void arm_smmu_cmdq_insert_cmd(struct arm_smmu_device *smmu, u64 *cmd)
933{
934 struct arm_smmu_queue *q = &smmu->cmdq.q;
935 bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
936
937 while (queue_insert_raw(q, cmd) == -ENOSPC) {
938 if (queue_poll_cons(q, false, wfe))
939 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
940 }
941}
942
Will Deacon48ec83b2015-05-27 17:25:59 +0100943static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
944 struct arm_smmu_cmdq_ent *ent)
945{
Will Deacon48ec83b2015-05-27 17:25:59 +0100946 u64 cmd[CMDQ_ENT_DWORDS];
Will Deacon8ded2902016-09-09 14:33:59 +0100947 unsigned long flags;
Will Deacon48ec83b2015-05-27 17:25:59 +0100948
949 if (arm_smmu_cmdq_build_cmd(cmd, ent)) {
950 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
951 ent->opcode);
952 return;
953 }
954
Will Deacon8ded2902016-09-09 14:33:59 +0100955 spin_lock_irqsave(&smmu->cmdq.lock, flags);
Robin Murphy2f657ad2017-08-31 14:44:25 +0100956 arm_smmu_cmdq_insert_cmd(smmu, cmd);
Will Deacon8ded2902016-09-09 14:33:59 +0100957 spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
Will Deacon48ec83b2015-05-27 17:25:59 +0100958}
959
Robin Murphy2f657ad2017-08-31 14:44:25 +0100960static void arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
961{
962 u64 cmd[CMDQ_ENT_DWORDS];
963 unsigned long flags;
964 bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
965 struct arm_smmu_cmdq_ent ent = { .opcode = CMDQ_OP_CMD_SYNC };
966 int ret;
967
968 arm_smmu_cmdq_build_cmd(cmd, &ent);
969
970 spin_lock_irqsave(&smmu->cmdq.lock, flags);
971 arm_smmu_cmdq_insert_cmd(smmu, cmd);
972 ret = queue_poll_cons(&smmu->cmdq.q, true, wfe);
973 spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
974
975 if (ret)
976 dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
977}
978
Will Deacon48ec83b2015-05-27 17:25:59 +0100979/* Context descriptor manipulation functions */
980static u64 arm_smmu_cpu_tcr_to_cd(u64 tcr)
981{
982 u64 val = 0;
983
984 /* Repack the TCR. Just care about TTBR0 for now */
985 val |= ARM_SMMU_TCR2CD(tcr, T0SZ);
986 val |= ARM_SMMU_TCR2CD(tcr, TG0);
987 val |= ARM_SMMU_TCR2CD(tcr, IRGN0);
988 val |= ARM_SMMU_TCR2CD(tcr, ORGN0);
989 val |= ARM_SMMU_TCR2CD(tcr, SH0);
990 val |= ARM_SMMU_TCR2CD(tcr, EPD0);
991 val |= ARM_SMMU_TCR2CD(tcr, EPD1);
992 val |= ARM_SMMU_TCR2CD(tcr, IPS);
993 val |= ARM_SMMU_TCR2CD(tcr, TBI0);
994
995 return val;
996}
997
998static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu,
999 struct arm_smmu_s1_cfg *cfg)
1000{
1001 u64 val;
1002
1003 /*
1004 * We don't need to issue any invalidation here, as we'll invalidate
1005 * the STE when installing the new entry anyway.
1006 */
1007 val = arm_smmu_cpu_tcr_to_cd(cfg->cd.tcr) |
1008#ifdef __BIG_ENDIAN
1009 CTXDESC_CD_0_ENDI |
1010#endif
1011 CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET_PRIVATE |
1012 CTXDESC_CD_0_AA64 | (u64)cfg->cd.asid << CTXDESC_CD_0_ASID_SHIFT |
1013 CTXDESC_CD_0_V;
Yisheng Xie9cff86fd22017-09-21 20:36:07 +08001014
1015 /* STALL_MODEL==0b10 && CD.S==0 is ILLEGAL */
1016 if (smmu->features & ARM_SMMU_FEAT_STALL_FORCE)
1017 val |= CTXDESC_CD_0_S;
1018
Will Deacon48ec83b2015-05-27 17:25:59 +01001019 cfg->cdptr[0] = cpu_to_le64(val);
1020
1021 val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK << CTXDESC_CD_1_TTB0_SHIFT;
1022 cfg->cdptr[1] = cpu_to_le64(val);
1023
1024 cfg->cdptr[3] = cpu_to_le64(cfg->cd.mair << CTXDESC_CD_3_MAIR_SHIFT);
1025}
1026
1027/* Stream table manipulation functions */
1028static void
1029arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
1030{
1031 u64 val = 0;
1032
1033 val |= (desc->span & STRTAB_L1_DESC_SPAN_MASK)
1034 << STRTAB_L1_DESC_SPAN_SHIFT;
1035 val |= desc->l2ptr_dma &
1036 STRTAB_L1_DESC_L2PTR_MASK << STRTAB_L1_DESC_L2PTR_SHIFT;
1037
1038 *dst = cpu_to_le64(val);
1039}
1040
1041static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
1042{
1043 struct arm_smmu_cmdq_ent cmd = {
1044 .opcode = CMDQ_OP_CFGI_STE,
1045 .cfgi = {
1046 .sid = sid,
1047 .leaf = true,
1048 },
1049 };
1050
1051 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
Robin Murphy2f657ad2017-08-31 14:44:25 +01001052 arm_smmu_cmdq_issue_sync(smmu);
Will Deacon48ec83b2015-05-27 17:25:59 +01001053}
1054
1055static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
1056 __le64 *dst, struct arm_smmu_strtab_ent *ste)
1057{
1058 /*
1059 * This is hideously complicated, but we only really care about
1060 * three cases at the moment:
1061 *
Will Deaconbeb3c6a2017-01-06 16:27:30 +00001062 * 1. Invalid (all zero) -> bypass/fault (init)
1063 * 2. Bypass/fault -> translation/bypass (attach)
1064 * 3. Translation/bypass -> bypass/fault (detach)
Will Deacon48ec83b2015-05-27 17:25:59 +01001065 *
1066 * Given that we can't update the STE atomically and the SMMU
1067 * doesn't read the thing in a defined order, that leaves us
1068 * with the following maintenance requirements:
1069 *
1070 * 1. Update Config, return (init time STEs aren't live)
1071 * 2. Write everything apart from dword 0, sync, write dword 0, sync
1072 * 3. Update Config, sync
1073 */
1074 u64 val = le64_to_cpu(dst[0]);
1075 bool ste_live = false;
1076 struct arm_smmu_cmdq_ent prefetch_cmd = {
1077 .opcode = CMDQ_OP_PREFETCH_CFG,
1078 .prefetch = {
1079 .sid = sid,
1080 },
1081 };
1082
1083 if (val & STRTAB_STE_0_V) {
1084 u64 cfg;
1085
1086 cfg = val & STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT;
1087 switch (cfg) {
1088 case STRTAB_STE_0_CFG_BYPASS:
1089 break;
1090 case STRTAB_STE_0_CFG_S1_TRANS:
1091 case STRTAB_STE_0_CFG_S2_TRANS:
1092 ste_live = true;
1093 break;
Will Deacon5bc0a112016-08-16 14:29:16 +01001094 case STRTAB_STE_0_CFG_ABORT:
1095 if (disable_bypass)
1096 break;
Will Deacon48ec83b2015-05-27 17:25:59 +01001097 default:
1098 BUG(); /* STE corruption */
1099 }
1100 }
1101
Nate Watterson810871c2016-12-20 23:11:48 -05001102 /* Nuke the existing STE_0 value, as we're going to rewrite it */
Will Deaconbeb3c6a2017-01-06 16:27:30 +00001103 val = STRTAB_STE_0_V;
Will Deacon48ec83b2015-05-27 17:25:59 +01001104
Will Deaconbeb3c6a2017-01-06 16:27:30 +00001105 /* Bypass/fault */
1106 if (!ste->assigned || !(ste->s1_cfg || ste->s2_cfg)) {
1107 if (!ste->assigned && disable_bypass)
1108 val |= STRTAB_STE_0_CFG_ABORT;
1109 else
1110 val |= STRTAB_STE_0_CFG_BYPASS;
1111
Will Deacon48ec83b2015-05-27 17:25:59 +01001112 dst[0] = cpu_to_le64(val);
Will Deacona0eacd82015-11-18 18:15:51 +00001113 dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING
1114 << STRTAB_STE_1_SHCFG_SHIFT);
Will Deacon48ec83b2015-05-27 17:25:59 +01001115 dst[2] = 0; /* Nuke the VMID */
Will Deacon704c0382017-10-05 16:49:37 +01001116 /*
1117 * The SMMU can perform negative caching, so we must sync
1118 * the STE regardless of whether the old value was live.
1119 */
1120 if (smmu)
Will Deacon48ec83b2015-05-27 17:25:59 +01001121 arm_smmu_sync_ste_for_sid(smmu, sid);
1122 return;
1123 }
1124
1125 if (ste->s1_cfg) {
1126 BUG_ON(ste_live);
1127 dst[1] = cpu_to_le64(
1128 STRTAB_STE_1_S1C_CACHE_WBRA
1129 << STRTAB_STE_1_S1CIR_SHIFT |
1130 STRTAB_STE_1_S1C_CACHE_WBRA
1131 << STRTAB_STE_1_S1COR_SHIFT |
1132 STRTAB_STE_1_S1C_SH_ISH << STRTAB_STE_1_S1CSH_SHIFT |
Will Deacon48ec83b2015-05-27 17:25:59 +01001133#ifdef CONFIG_PCI_ATS
1134 STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT |
1135#endif
Robin Murphy14b4dba2017-01-06 18:58:16 +05301136 STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT);
Will Deacon48ec83b2015-05-27 17:25:59 +01001137
Yisheng Xie9cff86fd22017-09-21 20:36:07 +08001138 if (smmu->features & ARM_SMMU_FEAT_STALLS &&
1139 !(smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
Prem Mallappa6380be02015-12-14 22:01:23 +05301140 dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
1141
Will Deacon48ec83b2015-05-27 17:25:59 +01001142 val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
1143 << STRTAB_STE_0_S1CTXPTR_SHIFT) |
1144 STRTAB_STE_0_CFG_S1_TRANS;
Will Deacon48ec83b2015-05-27 17:25:59 +01001145 }
1146
1147 if (ste->s2_cfg) {
1148 BUG_ON(ste_live);
1149 dst[2] = cpu_to_le64(
1150 ste->s2_cfg->vmid << STRTAB_STE_2_S2VMID_SHIFT |
1151 (ste->s2_cfg->vtcr & STRTAB_STE_2_VTCR_MASK)
1152 << STRTAB_STE_2_VTCR_SHIFT |
1153#ifdef __BIG_ENDIAN
1154 STRTAB_STE_2_S2ENDI |
1155#endif
1156 STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
1157 STRTAB_STE_2_S2R);
1158
1159 dst[3] = cpu_to_le64(ste->s2_cfg->vttbr &
1160 STRTAB_STE_3_S2TTB_MASK << STRTAB_STE_3_S2TTB_SHIFT);
1161
1162 val |= STRTAB_STE_0_CFG_S2_TRANS;
1163 }
1164
1165 arm_smmu_sync_ste_for_sid(smmu, sid);
1166 dst[0] = cpu_to_le64(val);
1167 arm_smmu_sync_ste_for_sid(smmu, sid);
1168
1169 /* It's likely that we'll want to use the new STE soon */
Zhen Lei5e929462015-07-07 04:30:18 +01001170 if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
1171 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
Will Deacon48ec83b2015-05-27 17:25:59 +01001172}
1173
1174static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
1175{
1176 unsigned int i;
Will Deaconbeb3c6a2017-01-06 16:27:30 +00001177 struct arm_smmu_strtab_ent ste = { .assigned = false };
Will Deacon48ec83b2015-05-27 17:25:59 +01001178
1179 for (i = 0; i < nent; ++i) {
1180 arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste);
1181 strtab += STRTAB_STE_DWORDS;
1182 }
1183}
1184
1185static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
1186{
1187 size_t size;
1188 void *strtab;
1189 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1190 struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[sid >> STRTAB_SPLIT];
1191
1192 if (desc->l2ptr)
1193 return 0;
1194
1195 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
Zhen Lei69146e72015-06-26 09:32:58 +01001196 strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
Will Deacon48ec83b2015-05-27 17:25:59 +01001197
1198 desc->span = STRTAB_SPLIT + 1;
Will Deacon04fa26c2015-10-30 18:12:41 +00001199 desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
1200 GFP_KERNEL | __GFP_ZERO);
Will Deacon48ec83b2015-05-27 17:25:59 +01001201 if (!desc->l2ptr) {
1202 dev_err(smmu->dev,
1203 "failed to allocate l2 stream table for SID %u\n",
1204 sid);
1205 return -ENOMEM;
1206 }
1207
1208 arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
1209 arm_smmu_write_strtab_l1_desc(strtab, desc);
1210 return 0;
1211}
1212
1213/* IRQ and event handlers */
1214static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
1215{
1216 int i;
1217 struct arm_smmu_device *smmu = dev;
1218 struct arm_smmu_queue *q = &smmu->evtq.q;
1219 u64 evt[EVTQ_ENT_DWORDS];
1220
Jean-Philippe Bruckerb4163fb2016-08-22 14:42:24 +01001221 do {
1222 while (!queue_remove_raw(q, evt)) {
1223 u8 id = evt[0] >> EVTQ_0_ID_SHIFT & EVTQ_0_ID_MASK;
Will Deacon48ec83b2015-05-27 17:25:59 +01001224
Jean-Philippe Bruckerb4163fb2016-08-22 14:42:24 +01001225 dev_info(smmu->dev, "event 0x%02x received:\n", id);
1226 for (i = 0; i < ARRAY_SIZE(evt); ++i)
1227 dev_info(smmu->dev, "\t0x%016llx\n",
1228 (unsigned long long)evt[i]);
1229
1230 }
1231
1232 /*
1233 * Not much we can do on overflow, so scream and pretend we're
1234 * trying harder.
1235 */
1236 if (queue_sync_prod(q) == -EOVERFLOW)
1237 dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
1238 } while (!queue_empty(q));
Will Deacon48ec83b2015-05-27 17:25:59 +01001239
1240 /* Sync our overflow flag, as we believe we're up to speed */
1241 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1242 return IRQ_HANDLED;
1243}
1244
Jean-Philippe Bruckerb4163fb2016-08-22 14:42:24 +01001245static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt)
Will Deacon48ec83b2015-05-27 17:25:59 +01001246{
Jean-Philippe Bruckerb4163fb2016-08-22 14:42:24 +01001247 u32 sid, ssid;
1248 u16 grpid;
1249 bool ssv, last;
Will Deacon48ec83b2015-05-27 17:25:59 +01001250
Jean-Philippe Bruckerb4163fb2016-08-22 14:42:24 +01001251 sid = evt[0] >> PRIQ_0_SID_SHIFT & PRIQ_0_SID_MASK;
1252 ssv = evt[0] & PRIQ_0_SSID_V;
1253 ssid = ssv ? evt[0] >> PRIQ_0_SSID_SHIFT & PRIQ_0_SSID_MASK : 0;
1254 last = evt[0] & PRIQ_0_PRG_LAST;
1255 grpid = evt[1] >> PRIQ_1_PRG_IDX_SHIFT & PRIQ_1_PRG_IDX_MASK;
Will Deacon48ec83b2015-05-27 17:25:59 +01001256
Jean-Philippe Bruckerb4163fb2016-08-22 14:42:24 +01001257 dev_info(smmu->dev, "unexpected PRI request received:\n");
1258 dev_info(smmu->dev,
1259 "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n",
1260 sid, ssid, grpid, last ? "L" : "",
1261 evt[0] & PRIQ_0_PERM_PRIV ? "" : "un",
1262 evt[0] & PRIQ_0_PERM_READ ? "R" : "",
1263 evt[0] & PRIQ_0_PERM_WRITE ? "W" : "",
1264 evt[0] & PRIQ_0_PERM_EXEC ? "X" : "",
1265 evt[1] & PRIQ_1_ADDR_MASK << PRIQ_1_ADDR_SHIFT);
1266
1267 if (last) {
1268 struct arm_smmu_cmdq_ent cmd = {
1269 .opcode = CMDQ_OP_PRI_RESP,
1270 .substream_valid = ssv,
1271 .pri = {
1272 .sid = sid,
1273 .ssid = ssid,
1274 .grpid = grpid,
1275 .resp = PRI_RESP_DENY,
1276 },
1277 };
1278
1279 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1280 }
Will Deacon48ec83b2015-05-27 17:25:59 +01001281}
1282
1283static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
1284{
1285 struct arm_smmu_device *smmu = dev;
1286 struct arm_smmu_queue *q = &smmu->priq.q;
1287 u64 evt[PRIQ_ENT_DWORDS];
1288
Jean-Philippe Bruckerb4163fb2016-08-22 14:42:24 +01001289 do {
1290 while (!queue_remove_raw(q, evt))
1291 arm_smmu_handle_ppr(smmu, evt);
Will Deacon48ec83b2015-05-27 17:25:59 +01001292
Jean-Philippe Bruckerb4163fb2016-08-22 14:42:24 +01001293 if (queue_sync_prod(q) == -EOVERFLOW)
1294 dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
1295 } while (!queue_empty(q));
Will Deacon48ec83b2015-05-27 17:25:59 +01001296
1297 /* Sync our overflow flag, as we believe we're up to speed */
1298 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1299 return IRQ_HANDLED;
1300}
1301
Will Deacon48ec83b2015-05-27 17:25:59 +01001302static irqreturn_t arm_smmu_cmdq_sync_handler(int irq, void *dev)
1303{
1304 /* We don't actually use CMD_SYNC interrupts for anything */
1305 return IRQ_HANDLED;
1306}
1307
1308static int arm_smmu_device_disable(struct arm_smmu_device *smmu);
1309
1310static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev)
1311{
Prem Mallappa324ba102015-12-14 22:01:14 +05301312 u32 gerror, gerrorn, active;
Will Deacon48ec83b2015-05-27 17:25:59 +01001313 struct arm_smmu_device *smmu = dev;
1314
1315 gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR);
1316 gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN);
1317
Prem Mallappa324ba102015-12-14 22:01:14 +05301318 active = gerror ^ gerrorn;
1319 if (!(active & GERROR_ERR_MASK))
Will Deacon48ec83b2015-05-27 17:25:59 +01001320 return IRQ_NONE; /* No errors pending */
1321
1322 dev_warn(smmu->dev,
1323 "unexpected global error reported (0x%08x), this could be serious\n",
Prem Mallappa324ba102015-12-14 22:01:14 +05301324 active);
Will Deacon48ec83b2015-05-27 17:25:59 +01001325
Prem Mallappa324ba102015-12-14 22:01:14 +05301326 if (active & GERROR_SFM_ERR) {
Will Deacon48ec83b2015-05-27 17:25:59 +01001327 dev_err(smmu->dev, "device has entered Service Failure Mode!\n");
1328 arm_smmu_device_disable(smmu);
1329 }
1330
Prem Mallappa324ba102015-12-14 22:01:14 +05301331 if (active & GERROR_MSI_GERROR_ABT_ERR)
Will Deacon48ec83b2015-05-27 17:25:59 +01001332 dev_warn(smmu->dev, "GERROR MSI write aborted\n");
1333
Jean-Philippe Bruckerb4163fb2016-08-22 14:42:24 +01001334 if (active & GERROR_MSI_PRIQ_ABT_ERR)
Will Deacon48ec83b2015-05-27 17:25:59 +01001335 dev_warn(smmu->dev, "PRIQ MSI write aborted\n");
Will Deacon48ec83b2015-05-27 17:25:59 +01001336
Jean-Philippe Bruckerb4163fb2016-08-22 14:42:24 +01001337 if (active & GERROR_MSI_EVTQ_ABT_ERR)
Will Deacon48ec83b2015-05-27 17:25:59 +01001338 dev_warn(smmu->dev, "EVTQ MSI write aborted\n");
Will Deacon48ec83b2015-05-27 17:25:59 +01001339
Prem Mallappa324ba102015-12-14 22:01:14 +05301340 if (active & GERROR_MSI_CMDQ_ABT_ERR) {
Will Deacon48ec83b2015-05-27 17:25:59 +01001341 dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
1342 arm_smmu_cmdq_sync_handler(irq, smmu->dev);
1343 }
1344
Prem Mallappa324ba102015-12-14 22:01:14 +05301345 if (active & GERROR_PRIQ_ABT_ERR)
Will Deacon48ec83b2015-05-27 17:25:59 +01001346 dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n");
1347
Prem Mallappa324ba102015-12-14 22:01:14 +05301348 if (active & GERROR_EVTQ_ABT_ERR)
Will Deacon48ec83b2015-05-27 17:25:59 +01001349 dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n");
1350
Prem Mallappa324ba102015-12-14 22:01:14 +05301351 if (active & GERROR_CMDQ_ERR)
Will Deacon48ec83b2015-05-27 17:25:59 +01001352 arm_smmu_cmdq_skip_err(smmu);
1353
1354 writel(gerror, smmu->base + ARM_SMMU_GERRORN);
1355 return IRQ_HANDLED;
1356}
1357
Geetha Sowjanyaf9354482017-06-23 19:04:36 +05301358static irqreturn_t arm_smmu_combined_irq_thread(int irq, void *dev)
1359{
1360 struct arm_smmu_device *smmu = dev;
1361
1362 arm_smmu_evtq_thread(irq, dev);
1363 if (smmu->features & ARM_SMMU_FEAT_PRI)
1364 arm_smmu_priq_thread(irq, dev);
1365
1366 return IRQ_HANDLED;
1367}
1368
1369static irqreturn_t arm_smmu_combined_irq_handler(int irq, void *dev)
1370{
1371 arm_smmu_gerror_handler(irq, dev);
1372 arm_smmu_cmdq_sync_handler(irq, dev);
1373 return IRQ_WAKE_THREAD;
1374}
1375
Will Deacon48ec83b2015-05-27 17:25:59 +01001376/* IO_PGTABLE API */
1377static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
1378{
Robin Murphy2f657ad2017-08-31 14:44:25 +01001379 arm_smmu_cmdq_issue_sync(smmu);
Will Deacon48ec83b2015-05-27 17:25:59 +01001380}
1381
1382static void arm_smmu_tlb_sync(void *cookie)
1383{
1384 struct arm_smmu_domain *smmu_domain = cookie;
1385 __arm_smmu_tlb_sync(smmu_domain->smmu);
1386}
1387
1388static void arm_smmu_tlb_inv_context(void *cookie)
1389{
1390 struct arm_smmu_domain *smmu_domain = cookie;
1391 struct arm_smmu_device *smmu = smmu_domain->smmu;
1392 struct arm_smmu_cmdq_ent cmd;
1393
1394 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1395 cmd.opcode = CMDQ_OP_TLBI_NH_ASID;
1396 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1397 cmd.tlbi.vmid = 0;
1398 } else {
1399 cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
1400 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1401 }
1402
1403 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1404 __arm_smmu_tlb_sync(smmu);
1405}
1406
1407static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +00001408 size_t granule, bool leaf, void *cookie)
Will Deacon48ec83b2015-05-27 17:25:59 +01001409{
1410 struct arm_smmu_domain *smmu_domain = cookie;
1411 struct arm_smmu_device *smmu = smmu_domain->smmu;
1412 struct arm_smmu_cmdq_ent cmd = {
1413 .tlbi = {
1414 .leaf = leaf,
1415 .addr = iova,
1416 },
1417 };
1418
1419 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1420 cmd.opcode = CMDQ_OP_TLBI_NH_VA;
1421 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1422 } else {
1423 cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
1424 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1425 }
1426
Robin Murphy75df1382015-12-07 18:18:52 +00001427 do {
1428 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1429 cmd.tlbi.addr += granule;
1430 } while (size -= granule);
Will Deacon48ec83b2015-05-27 17:25:59 +01001431}
1432
Bhumika Goyalca297aa2016-10-25 23:36:11 +05301433static const struct iommu_gather_ops arm_smmu_gather_ops = {
Will Deacon48ec83b2015-05-27 17:25:59 +01001434 .tlb_flush_all = arm_smmu_tlb_inv_context,
1435 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1436 .tlb_sync = arm_smmu_tlb_sync,
Will Deacon48ec83b2015-05-27 17:25:59 +01001437};
1438
1439/* IOMMU API */
1440static bool arm_smmu_capable(enum iommu_cap cap)
1441{
1442 switch (cap) {
1443 case IOMMU_CAP_CACHE_COHERENCY:
1444 return true;
Will Deacon48ec83b2015-05-27 17:25:59 +01001445 case IOMMU_CAP_NOEXEC:
1446 return true;
1447 default:
1448 return false;
1449 }
1450}
1451
1452static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
1453{
1454 struct arm_smmu_domain *smmu_domain;
1455
Will Deaconbeb3c6a2017-01-06 16:27:30 +00001456 if (type != IOMMU_DOMAIN_UNMANAGED &&
1457 type != IOMMU_DOMAIN_DMA &&
1458 type != IOMMU_DOMAIN_IDENTITY)
Will Deacon48ec83b2015-05-27 17:25:59 +01001459 return NULL;
1460
1461 /*
1462 * Allocate the domain and initialise some of its data structures.
1463 * We can't really do anything meaningful until we've added a
1464 * master.
1465 */
1466 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1467 if (!smmu_domain)
1468 return NULL;
1469
Robin Murphy9adb9592016-01-26 18:06:36 +00001470 if (type == IOMMU_DOMAIN_DMA &&
1471 iommu_get_dma_cookie(&smmu_domain->domain)) {
1472 kfree(smmu_domain);
1473 return NULL;
1474 }
1475
Will Deacon48ec83b2015-05-27 17:25:59 +01001476 mutex_init(&smmu_domain->init_mutex);
Will Deacon48ec83b2015-05-27 17:25:59 +01001477 return &smmu_domain->domain;
1478}
1479
1480static int arm_smmu_bitmap_alloc(unsigned long *map, int span)
1481{
1482 int idx, size = 1 << span;
1483
1484 do {
1485 idx = find_first_zero_bit(map, size);
1486 if (idx == size)
1487 return -ENOSPC;
1488 } while (test_and_set_bit(idx, map));
1489
1490 return idx;
1491}
1492
1493static void arm_smmu_bitmap_free(unsigned long *map, int idx)
1494{
1495 clear_bit(idx, map);
1496}
1497
1498static void arm_smmu_domain_free(struct iommu_domain *domain)
1499{
1500 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1501 struct arm_smmu_device *smmu = smmu_domain->smmu;
1502
Robin Murphy9adb9592016-01-26 18:06:36 +00001503 iommu_put_dma_cookie(domain);
Markus Elfringa6e08fb2015-06-29 17:47:43 +01001504 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon48ec83b2015-05-27 17:25:59 +01001505
1506 /* Free the CD and ASID, if we allocated them */
1507 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1508 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1509
1510 if (cfg->cdptr) {
Will Deacon04fa26c2015-10-30 18:12:41 +00001511 dmam_free_coherent(smmu_domain->smmu->dev,
1512 CTXDESC_CD_DWORDS << 3,
1513 cfg->cdptr,
1514 cfg->cdptr_dma);
Will Deacon48ec83b2015-05-27 17:25:59 +01001515
1516 arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid);
1517 }
1518 } else {
1519 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1520 if (cfg->vmid)
1521 arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
1522 }
1523
1524 kfree(smmu_domain);
1525}
1526
1527static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
1528 struct io_pgtable_cfg *pgtbl_cfg)
1529{
1530 int ret;
Will Deaconc0733a22015-10-13 17:51:14 +01001531 int asid;
Will Deacon48ec83b2015-05-27 17:25:59 +01001532 struct arm_smmu_device *smmu = smmu_domain->smmu;
1533 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1534
1535 asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001536 if (asid < 0)
Will Deacon48ec83b2015-05-27 17:25:59 +01001537 return asid;
1538
Will Deacon04fa26c2015-10-30 18:12:41 +00001539 cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3,
1540 &cfg->cdptr_dma,
1541 GFP_KERNEL | __GFP_ZERO);
Will Deacon48ec83b2015-05-27 17:25:59 +01001542 if (!cfg->cdptr) {
1543 dev_warn(smmu->dev, "failed to allocate context descriptor\n");
Will Deaconc0733a22015-10-13 17:51:14 +01001544 ret = -ENOMEM;
Will Deacon48ec83b2015-05-27 17:25:59 +01001545 goto out_free_asid;
1546 }
1547
Will Deaconc0733a22015-10-13 17:51:14 +01001548 cfg->cd.asid = (u16)asid;
Will Deacon48ec83b2015-05-27 17:25:59 +01001549 cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1550 cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1551 cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1552 return 0;
1553
1554out_free_asid:
1555 arm_smmu_bitmap_free(smmu->asid_map, asid);
1556 return ret;
1557}
1558
1559static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
1560 struct io_pgtable_cfg *pgtbl_cfg)
1561{
Will Deaconc0733a22015-10-13 17:51:14 +01001562 int vmid;
Will Deacon48ec83b2015-05-27 17:25:59 +01001563 struct arm_smmu_device *smmu = smmu_domain->smmu;
1564 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1565
1566 vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001567 if (vmid < 0)
Will Deacon48ec83b2015-05-27 17:25:59 +01001568 return vmid;
1569
Will Deaconc0733a22015-10-13 17:51:14 +01001570 cfg->vmid = (u16)vmid;
Will Deacon48ec83b2015-05-27 17:25:59 +01001571 cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
1572 cfg->vtcr = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1573 return 0;
1574}
1575
Will Deacon48ec83b2015-05-27 17:25:59 +01001576static int arm_smmu_domain_finalise(struct iommu_domain *domain)
1577{
1578 int ret;
1579 unsigned long ias, oas;
1580 enum io_pgtable_fmt fmt;
1581 struct io_pgtable_cfg pgtbl_cfg;
1582 struct io_pgtable_ops *pgtbl_ops;
1583 int (*finalise_stage_fn)(struct arm_smmu_domain *,
1584 struct io_pgtable_cfg *);
1585 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1586 struct arm_smmu_device *smmu = smmu_domain->smmu;
1587
Will Deaconbeb3c6a2017-01-06 16:27:30 +00001588 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
1589 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
1590 return 0;
1591 }
1592
Will Deacon48ec83b2015-05-27 17:25:59 +01001593 /* Restrict the stage to what we can actually support */
1594 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1595 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1596 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1597 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1598
1599 switch (smmu_domain->stage) {
1600 case ARM_SMMU_DOMAIN_S1:
1601 ias = VA_BITS;
1602 oas = smmu->ias;
1603 fmt = ARM_64_LPAE_S1;
1604 finalise_stage_fn = arm_smmu_domain_finalise_s1;
1605 break;
1606 case ARM_SMMU_DOMAIN_NESTED:
1607 case ARM_SMMU_DOMAIN_S2:
1608 ias = smmu->ias;
1609 oas = smmu->oas;
1610 fmt = ARM_64_LPAE_S2;
1611 finalise_stage_fn = arm_smmu_domain_finalise_s2;
1612 break;
1613 default:
1614 return -EINVAL;
1615 }
1616
1617 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +01001618 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon48ec83b2015-05-27 17:25:59 +01001619 .ias = ias,
1620 .oas = oas,
1621 .tlb = &arm_smmu_gather_ops,
Robin Murphybdc6d972015-07-29 19:46:07 +01001622 .iommu_dev = smmu->dev,
Will Deacon48ec83b2015-05-27 17:25:59 +01001623 };
1624
Robin Murphy81b3c252017-06-22 16:53:53 +01001625 if (smmu->features & ARM_SMMU_FEAT_COHERENCY)
1626 pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
1627
Will Deacon48ec83b2015-05-27 17:25:59 +01001628 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
1629 if (!pgtbl_ops)
1630 return -ENOMEM;
1631
Robin Murphyd5466352016-05-09 17:20:09 +01001632 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Robin Murphy455eb7d2016-09-12 17:13:58 +01001633 domain->geometry.aperture_end = (1UL << ias) - 1;
1634 domain->geometry.force_aperture = true;
Will Deacon48ec83b2015-05-27 17:25:59 +01001635 smmu_domain->pgtbl_ops = pgtbl_ops;
1636
1637 ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001638 if (ret < 0)
Will Deacon48ec83b2015-05-27 17:25:59 +01001639 free_io_pgtable_ops(pgtbl_ops);
1640
1641 return ret;
1642}
1643
Will Deacon48ec83b2015-05-27 17:25:59 +01001644static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
1645{
1646 __le64 *step;
1647 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1648
1649 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1650 struct arm_smmu_strtab_l1_desc *l1_desc;
1651 int idx;
1652
1653 /* Two-level walk */
1654 idx = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS;
1655 l1_desc = &cfg->l1_desc[idx];
1656 idx = (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS;
1657 step = &l1_desc->l2ptr[idx];
1658 } else {
1659 /* Simple linear lookup */
1660 step = &cfg->strtab[sid * STRTAB_STE_DWORDS];
1661 }
1662
1663 return step;
1664}
1665
Will Deacon67560ed2017-03-01 21:11:29 +00001666static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
Will Deacon48ec83b2015-05-27 17:25:59 +01001667{
1668 int i;
Robin Murphy8f785152016-09-12 17:13:45 +01001669 struct arm_smmu_master_data *master = fwspec->iommu_priv;
1670 struct arm_smmu_device *smmu = master->smmu;
Will Deacon48ec83b2015-05-27 17:25:59 +01001671
Robin Murphy8f785152016-09-12 17:13:45 +01001672 for (i = 0; i < fwspec->num_ids; ++i) {
1673 u32 sid = fwspec->ids[i];
Will Deacon48ec83b2015-05-27 17:25:59 +01001674 __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
1675
Robin Murphy8f785152016-09-12 17:13:45 +01001676 arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
Will Deacon48ec83b2015-05-27 17:25:59 +01001677 }
Will Deacon48ec83b2015-05-27 17:25:59 +01001678}
1679
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001680static void arm_smmu_detach_dev(struct device *dev)
1681{
Robin Murphy8f785152016-09-12 17:13:45 +01001682 struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001683
Will Deaconbeb3c6a2017-01-06 16:27:30 +00001684 master->ste.assigned = false;
Will Deacon67560ed2017-03-01 21:11:29 +00001685 arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001686}
1687
Will Deacon48ec83b2015-05-27 17:25:59 +01001688static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1689{
1690 int ret = 0;
1691 struct arm_smmu_device *smmu;
1692 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Robin Murphy8f785152016-09-12 17:13:45 +01001693 struct arm_smmu_master_data *master;
1694 struct arm_smmu_strtab_ent *ste;
Will Deacon48ec83b2015-05-27 17:25:59 +01001695
Robin Murphy8f785152016-09-12 17:13:45 +01001696 if (!dev->iommu_fwspec)
Will Deacon48ec83b2015-05-27 17:25:59 +01001697 return -ENOENT;
1698
Robin Murphy8f785152016-09-12 17:13:45 +01001699 master = dev->iommu_fwspec->iommu_priv;
1700 smmu = master->smmu;
1701 ste = &master->ste;
1702
Will Deacon48ec83b2015-05-27 17:25:59 +01001703 /* Already attached to a different domain? */
Will Deaconbeb3c6a2017-01-06 16:27:30 +00001704 if (ste->assigned)
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001705 arm_smmu_detach_dev(dev);
Will Deacon48ec83b2015-05-27 17:25:59 +01001706
Will Deacon48ec83b2015-05-27 17:25:59 +01001707 mutex_lock(&smmu_domain->init_mutex);
1708
1709 if (!smmu_domain->smmu) {
1710 smmu_domain->smmu = smmu;
1711 ret = arm_smmu_domain_finalise(domain);
1712 if (ret) {
1713 smmu_domain->smmu = NULL;
1714 goto out_unlock;
1715 }
1716 } else if (smmu_domain->smmu != smmu) {
1717 dev_err(dev,
1718 "cannot attach to SMMU %s (upstream of %s)\n",
1719 dev_name(smmu_domain->smmu->dev),
1720 dev_name(smmu->dev));
1721 ret = -ENXIO;
1722 goto out_unlock;
1723 }
1724
Will Deaconbeb3c6a2017-01-06 16:27:30 +00001725 ste->assigned = true;
Will Deacon48ec83b2015-05-27 17:25:59 +01001726
Will Deaconbeb3c6a2017-01-06 16:27:30 +00001727 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) {
1728 ste->s1_cfg = NULL;
1729 ste->s2_cfg = NULL;
1730 } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
Robin Murphy8f785152016-09-12 17:13:45 +01001731 ste->s1_cfg = &smmu_domain->s1_cfg;
1732 ste->s2_cfg = NULL;
1733 arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
1734 } else {
1735 ste->s1_cfg = NULL;
1736 ste->s2_cfg = &smmu_domain->s2_cfg;
1737 }
Will Deaconcbf82772016-02-18 12:05:57 +00001738
Will Deacon67560ed2017-03-01 21:11:29 +00001739 arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
Will Deacon48ec83b2015-05-27 17:25:59 +01001740out_unlock:
1741 mutex_unlock(&smmu_domain->init_mutex);
1742 return ret;
1743}
1744
Will Deacon48ec83b2015-05-27 17:25:59 +01001745static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1746 phys_addr_t paddr, size_t size, int prot)
1747{
Robin Murphy58188af2017-06-22 16:53:57 +01001748 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Will Deacon48ec83b2015-05-27 17:25:59 +01001749
1750 if (!ops)
1751 return -ENODEV;
1752
Robin Murphy58188af2017-06-22 16:53:57 +01001753 return ops->map(ops, iova, paddr, size, prot);
Will Deacon48ec83b2015-05-27 17:25:59 +01001754}
1755
1756static size_t
1757arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
1758{
Robin Murphy58188af2017-06-22 16:53:57 +01001759 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Will Deacon48ec83b2015-05-27 17:25:59 +01001760
1761 if (!ops)
1762 return 0;
1763
Robin Murphy58188af2017-06-22 16:53:57 +01001764 return ops->unmap(ops, iova, size);
Will Deacon48ec83b2015-05-27 17:25:59 +01001765}
1766
1767static phys_addr_t
1768arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1769{
Robin Murphy58188af2017-06-22 16:53:57 +01001770 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Will Deacon48ec83b2015-05-27 17:25:59 +01001771
Sunil Gouthambdf95922017-04-25 15:27:52 +05301772 if (domain->type == IOMMU_DOMAIN_IDENTITY)
1773 return iova;
1774
Will Deacon48ec83b2015-05-27 17:25:59 +01001775 if (!ops)
1776 return 0;
1777
Robin Murphy58188af2017-06-22 16:53:57 +01001778 return ops->iova_to_phys(ops, iova);
Will Deacon48ec83b2015-05-27 17:25:59 +01001779}
1780
Robin Murphy8f785152016-09-12 17:13:45 +01001781static struct platform_driver arm_smmu_driver;
1782
1783static int arm_smmu_match_node(struct device *dev, void *data)
Will Deacon48ec83b2015-05-27 17:25:59 +01001784{
Lorenzo Pieralisi778de072016-11-21 10:01:38 +00001785 return dev->fwnode == data;
Will Deacon48ec83b2015-05-27 17:25:59 +01001786}
1787
Lorenzo Pieralisi778de072016-11-21 10:01:38 +00001788static
1789struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
Will Deacon48ec83b2015-05-27 17:25:59 +01001790{
Robin Murphy8f785152016-09-12 17:13:45 +01001791 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
Lorenzo Pieralisi778de072016-11-21 10:01:38 +00001792 fwnode, arm_smmu_match_node);
Robin Murphy8f785152016-09-12 17:13:45 +01001793 put_device(dev);
1794 return dev ? dev_get_drvdata(dev) : NULL;
Will Deacon48ec83b2015-05-27 17:25:59 +01001795}
1796
1797static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
1798{
1799 unsigned long limit = smmu->strtab_cfg.num_l1_ents;
1800
1801 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
1802 limit *= 1UL << STRTAB_SPLIT;
1803
1804 return sid < limit;
1805}
1806
Robin Murphy8f785152016-09-12 17:13:45 +01001807static struct iommu_ops arm_smmu_ops;
1808
Will Deacon48ec83b2015-05-27 17:25:59 +01001809static int arm_smmu_add_device(struct device *dev)
1810{
1811 int i, ret;
Will Deacon48ec83b2015-05-27 17:25:59 +01001812 struct arm_smmu_device *smmu;
Robin Murphy8f785152016-09-12 17:13:45 +01001813 struct arm_smmu_master_data *master;
1814 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1815 struct iommu_group *group;
Will Deacon48ec83b2015-05-27 17:25:59 +01001816
Robin Murphy8f785152016-09-12 17:13:45 +01001817 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Will Deacon48ec83b2015-05-27 17:25:59 +01001818 return -ENODEV;
Robin Murphy8f785152016-09-12 17:13:45 +01001819 /*
1820 * We _can_ actually withstand dodgy bus code re-calling add_device()
1821 * without an intervening remove_device()/of_xlate() sequence, but
1822 * we're not going to do so quietly...
1823 */
1824 if (WARN_ON_ONCE(fwspec->iommu_priv)) {
1825 master = fwspec->iommu_priv;
1826 smmu = master->smmu;
Will Deacon48ec83b2015-05-27 17:25:59 +01001827 } else {
Lorenzo Pieralisi778de072016-11-21 10:01:38 +00001828 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
Robin Murphy8f785152016-09-12 17:13:45 +01001829 if (!smmu)
1830 return -ENODEV;
1831 master = kzalloc(sizeof(*master), GFP_KERNEL);
1832 if (!master)
1833 return -ENOMEM;
1834
1835 master->smmu = smmu;
1836 fwspec->iommu_priv = master;
Will Deacon48ec83b2015-05-27 17:25:59 +01001837 }
1838
Robin Murphy8f785152016-09-12 17:13:45 +01001839 /* Check the SIDs are in range of the SMMU and our stream table */
1840 for (i = 0; i < fwspec->num_ids; i++) {
1841 u32 sid = fwspec->ids[i];
1842
1843 if (!arm_smmu_sid_in_range(smmu, sid))
1844 return -ERANGE;
1845
1846 /* Ensure l2 strtab is initialised */
1847 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1848 ret = arm_smmu_init_l2_strtab(smmu, sid);
1849 if (ret)
1850 return ret;
1851 }
Will Deacon48ec83b2015-05-27 17:25:59 +01001852 }
1853
Robin Murphy8f785152016-09-12 17:13:45 +01001854 group = iommu_group_get_for_dev(dev);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001855 if (!IS_ERR(group)) {
Robin Murphy8f785152016-09-12 17:13:45 +01001856 iommu_group_put(group);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001857 iommu_device_link(&smmu->iommu, dev);
1858 }
Will Deacon48ec83b2015-05-27 17:25:59 +01001859
Robin Murphy8f785152016-09-12 17:13:45 +01001860 return PTR_ERR_OR_ZERO(group);
Will Deacon48ec83b2015-05-27 17:25:59 +01001861}
1862
1863static void arm_smmu_remove_device(struct device *dev)
1864{
Robin Murphy8f785152016-09-12 17:13:45 +01001865 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1866 struct arm_smmu_master_data *master;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001867 struct arm_smmu_device *smmu;
Robin Murphy8f785152016-09-12 17:13:45 +01001868
1869 if (!fwspec || fwspec->ops != &arm_smmu_ops)
1870 return;
1871
1872 master = fwspec->iommu_priv;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001873 smmu = master->smmu;
Will Deaconbeb3c6a2017-01-06 16:27:30 +00001874 if (master && master->ste.assigned)
Robin Murphy8f785152016-09-12 17:13:45 +01001875 arm_smmu_detach_dev(dev);
Will Deacon48ec83b2015-05-27 17:25:59 +01001876 iommu_group_remove_device(dev);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001877 iommu_device_unlink(&smmu->iommu, dev);
Robin Murphy8f785152016-09-12 17:13:45 +01001878 kfree(master);
1879 iommu_fwspec_free(dev);
Will Deacon48ec83b2015-05-27 17:25:59 +01001880}
1881
Robin Murphy08d4ca22016-09-12 17:13:46 +01001882static struct iommu_group *arm_smmu_device_group(struct device *dev)
1883{
1884 struct iommu_group *group;
1885
1886 /*
1887 * We don't support devices sharing stream IDs other than PCI RID
1888 * aliases, since the necessary ID-to-device lookup becomes rather
1889 * impractical given a potential sparse 32-bit stream ID space.
1890 */
1891 if (dev_is_pci(dev))
1892 group = pci_device_group(dev);
1893 else
1894 group = generic_device_group(dev);
1895
1896 return group;
1897}
1898
Will Deacon48ec83b2015-05-27 17:25:59 +01001899static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1900 enum iommu_attr attr, void *data)
1901{
1902 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1903
Will Deacon0834cc22017-01-06 16:28:17 +00001904 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
1905 return -EINVAL;
1906
Will Deacon48ec83b2015-05-27 17:25:59 +01001907 switch (attr) {
1908 case DOMAIN_ATTR_NESTING:
1909 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1910 return 0;
1911 default:
1912 return -ENODEV;
1913 }
1914}
1915
1916static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1917 enum iommu_attr attr, void *data)
1918{
1919 int ret = 0;
1920 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1921
Will Deacon0834cc22017-01-06 16:28:17 +00001922 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
1923 return -EINVAL;
1924
Will Deacon48ec83b2015-05-27 17:25:59 +01001925 mutex_lock(&smmu_domain->init_mutex);
1926
1927 switch (attr) {
1928 case DOMAIN_ATTR_NESTING:
1929 if (smmu_domain->smmu) {
1930 ret = -EPERM;
1931 goto out_unlock;
1932 }
1933
1934 if (*(int *)data)
1935 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1936 else
1937 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1938
1939 break;
1940 default:
1941 ret = -ENODEV;
1942 }
1943
1944out_unlock:
1945 mutex_unlock(&smmu_domain->init_mutex);
1946 return ret;
1947}
1948
Robin Murphy8f785152016-09-12 17:13:45 +01001949static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1950{
Robin Murphy8f785152016-09-12 17:13:45 +01001951 return iommu_fwspec_add_ids(dev, args->args, 1);
1952}
1953
Eric Auger50019f02017-01-19 20:57:56 +00001954static void arm_smmu_get_resv_regions(struct device *dev,
1955 struct list_head *head)
1956{
1957 struct iommu_resv_region *region;
1958 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1959
1960 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00001961 prot, IOMMU_RESV_SW_MSI);
Eric Auger50019f02017-01-19 20:57:56 +00001962 if (!region)
1963 return;
1964
1965 list_add_tail(&region->list, head);
Robin Murphy273df962017-03-16 17:00:19 +00001966
1967 iommu_dma_get_resv_regions(dev, head);
Eric Auger50019f02017-01-19 20:57:56 +00001968}
1969
1970static void arm_smmu_put_resv_regions(struct device *dev,
1971 struct list_head *head)
1972{
1973 struct iommu_resv_region *entry, *next;
1974
1975 list_for_each_entry_safe(entry, next, head, list)
1976 kfree(entry);
1977}
1978
Will Deacon48ec83b2015-05-27 17:25:59 +01001979static struct iommu_ops arm_smmu_ops = {
1980 .capable = arm_smmu_capable,
1981 .domain_alloc = arm_smmu_domain_alloc,
1982 .domain_free = arm_smmu_domain_free,
1983 .attach_dev = arm_smmu_attach_dev,
Will Deacon48ec83b2015-05-27 17:25:59 +01001984 .map = arm_smmu_map,
1985 .unmap = arm_smmu_unmap,
Jean-Philippe Brucker9aeb26c2016-06-03 11:50:30 +01001986 .map_sg = default_iommu_map_sg,
Will Deacon48ec83b2015-05-27 17:25:59 +01001987 .iova_to_phys = arm_smmu_iova_to_phys,
1988 .add_device = arm_smmu_add_device,
1989 .remove_device = arm_smmu_remove_device,
Robin Murphy08d4ca22016-09-12 17:13:46 +01001990 .device_group = arm_smmu_device_group,
Will Deacon48ec83b2015-05-27 17:25:59 +01001991 .domain_get_attr = arm_smmu_domain_get_attr,
1992 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy8f785152016-09-12 17:13:45 +01001993 .of_xlate = arm_smmu_of_xlate,
Eric Auger50019f02017-01-19 20:57:56 +00001994 .get_resv_regions = arm_smmu_get_resv_regions,
1995 .put_resv_regions = arm_smmu_put_resv_regions,
Will Deacon48ec83b2015-05-27 17:25:59 +01001996 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1997};
1998
1999/* Probing and initialisation functions */
2000static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
2001 struct arm_smmu_queue *q,
2002 unsigned long prod_off,
2003 unsigned long cons_off,
2004 size_t dwords)
2005{
2006 size_t qsz = ((1 << q->max_n_shift) * dwords) << 3;
2007
Will Deacon04fa26c2015-10-30 18:12:41 +00002008 q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL);
Will Deacon48ec83b2015-05-27 17:25:59 +01002009 if (!q->base) {
2010 dev_err(smmu->dev, "failed to allocate queue (0x%zx bytes)\n",
2011 qsz);
2012 return -ENOMEM;
2013 }
2014
Linu Cheriane5b829d2017-06-22 17:35:37 +05302015 q->prod_reg = arm_smmu_page1_fixup(prod_off, smmu);
2016 q->cons_reg = arm_smmu_page1_fixup(cons_off, smmu);
Will Deacon48ec83b2015-05-27 17:25:59 +01002017 q->ent_dwords = dwords;
2018
2019 q->q_base = Q_BASE_RWA;
2020 q->q_base |= q->base_dma & Q_BASE_ADDR_MASK << Q_BASE_ADDR_SHIFT;
2021 q->q_base |= (q->max_n_shift & Q_BASE_LOG2SIZE_MASK)
2022 << Q_BASE_LOG2SIZE_SHIFT;
2023
2024 q->prod = q->cons = 0;
2025 return 0;
2026}
2027
Will Deacon48ec83b2015-05-27 17:25:59 +01002028static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
2029{
2030 int ret;
2031
2032 /* cmdq */
2033 spin_lock_init(&smmu->cmdq.lock);
2034 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
2035 ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS);
2036 if (ret)
Will Deacon04fa26c2015-10-30 18:12:41 +00002037 return ret;
Will Deacon48ec83b2015-05-27 17:25:59 +01002038
2039 /* evtq */
2040 ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD,
2041 ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS);
2042 if (ret)
Will Deacon04fa26c2015-10-30 18:12:41 +00002043 return ret;
Will Deacon48ec83b2015-05-27 17:25:59 +01002044
2045 /* priq */
2046 if (!(smmu->features & ARM_SMMU_FEAT_PRI))
2047 return 0;
2048
Will Deacon04fa26c2015-10-30 18:12:41 +00002049 return arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD,
2050 ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS);
Will Deacon48ec83b2015-05-27 17:25:59 +01002051}
2052
2053static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
2054{
2055 unsigned int i;
2056 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2057 size_t size = sizeof(*cfg->l1_desc) * cfg->num_l1_ents;
2058 void *strtab = smmu->strtab_cfg.strtab;
2059
2060 cfg->l1_desc = devm_kzalloc(smmu->dev, size, GFP_KERNEL);
2061 if (!cfg->l1_desc) {
2062 dev_err(smmu->dev, "failed to allocate l1 stream table desc\n");
2063 return -ENOMEM;
2064 }
2065
2066 for (i = 0; i < cfg->num_l1_ents; ++i) {
2067 arm_smmu_write_strtab_l1_desc(strtab, &cfg->l1_desc[i]);
2068 strtab += STRTAB_L1_DESC_DWORDS << 3;
2069 }
2070
2071 return 0;
2072}
2073
2074static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
2075{
2076 void *strtab;
2077 u64 reg;
Will Deacond2e88e72015-06-30 10:02:28 +01002078 u32 size, l1size;
Will Deacon48ec83b2015-05-27 17:25:59 +01002079 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2080
Nate Watterson692c4e42017-01-10 14:47:13 -05002081 /* Calculate the L1 size, capped to the SIDSIZE. */
2082 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
2083 size = min(size, smmu->sid_bits - STRTAB_SPLIT);
Will Deacond2e88e72015-06-30 10:02:28 +01002084 cfg->num_l1_ents = 1 << size;
2085
2086 size += STRTAB_SPLIT;
2087 if (size < smmu->sid_bits)
Will Deacon48ec83b2015-05-27 17:25:59 +01002088 dev_warn(smmu->dev,
2089 "2-level strtab only covers %u/%u bits of SID\n",
Will Deacond2e88e72015-06-30 10:02:28 +01002090 size, smmu->sid_bits);
Will Deacon48ec83b2015-05-27 17:25:59 +01002091
Will Deacond2e88e72015-06-30 10:02:28 +01002092 l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
Will Deacon04fa26c2015-10-30 18:12:41 +00002093 strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
2094 GFP_KERNEL | __GFP_ZERO);
Will Deacon48ec83b2015-05-27 17:25:59 +01002095 if (!strtab) {
2096 dev_err(smmu->dev,
2097 "failed to allocate l1 stream table (%u bytes)\n",
2098 size);
2099 return -ENOMEM;
2100 }
2101 cfg->strtab = strtab;
2102
2103 /* Configure strtab_base_cfg for 2 levels */
2104 reg = STRTAB_BASE_CFG_FMT_2LVL;
2105 reg |= (size & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2106 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2107 reg |= (STRTAB_SPLIT & STRTAB_BASE_CFG_SPLIT_MASK)
2108 << STRTAB_BASE_CFG_SPLIT_SHIFT;
2109 cfg->strtab_base_cfg = reg;
2110
Will Deacon04fa26c2015-10-30 18:12:41 +00002111 return arm_smmu_init_l1_strtab(smmu);
Will Deacon48ec83b2015-05-27 17:25:59 +01002112}
2113
2114static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
2115{
2116 void *strtab;
2117 u64 reg;
2118 u32 size;
2119 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2120
2121 size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3);
Will Deacon04fa26c2015-10-30 18:12:41 +00002122 strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma,
2123 GFP_KERNEL | __GFP_ZERO);
Will Deacon48ec83b2015-05-27 17:25:59 +01002124 if (!strtab) {
2125 dev_err(smmu->dev,
2126 "failed to allocate linear stream table (%u bytes)\n",
2127 size);
2128 return -ENOMEM;
2129 }
2130 cfg->strtab = strtab;
2131 cfg->num_l1_ents = 1 << smmu->sid_bits;
2132
2133 /* Configure strtab_base_cfg for a linear table covering all SIDs */
2134 reg = STRTAB_BASE_CFG_FMT_LINEAR;
2135 reg |= (smmu->sid_bits & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2136 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2137 cfg->strtab_base_cfg = reg;
2138
2139 arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents);
2140 return 0;
2141}
2142
2143static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
2144{
2145 u64 reg;
2146 int ret;
2147
2148 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
2149 ret = arm_smmu_init_strtab_2lvl(smmu);
2150 else
2151 ret = arm_smmu_init_strtab_linear(smmu);
2152
2153 if (ret)
2154 return ret;
2155
2156 /* Set the strtab base address */
2157 reg = smmu->strtab_cfg.strtab_dma &
2158 STRTAB_BASE_ADDR_MASK << STRTAB_BASE_ADDR_SHIFT;
2159 reg |= STRTAB_BASE_RA;
2160 smmu->strtab_cfg.strtab_base = reg;
2161
2162 /* Allocate the first VMID for stage-2 bypass STEs */
2163 set_bit(0, smmu->vmid_map);
2164 return 0;
2165}
2166
Will Deacon48ec83b2015-05-27 17:25:59 +01002167static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
2168{
2169 int ret;
2170
2171 ret = arm_smmu_init_queues(smmu);
2172 if (ret)
2173 return ret;
2174
Will Deacon04fa26c2015-10-30 18:12:41 +00002175 return arm_smmu_init_strtab(smmu);
Will Deacon48ec83b2015-05-27 17:25:59 +01002176}
2177
2178static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
2179 unsigned int reg_off, unsigned int ack_off)
2180{
2181 u32 reg;
2182
2183 writel_relaxed(val, smmu->base + reg_off);
2184 return readl_relaxed_poll_timeout(smmu->base + ack_off, reg, reg == val,
2185 1, ARM_SMMU_POLL_TIMEOUT_US);
2186}
2187
Robin Murphydc87a982016-09-12 17:13:44 +01002188/* GBPA is "special" */
2189static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr)
2190{
2191 int ret;
2192 u32 reg, __iomem *gbpa = smmu->base + ARM_SMMU_GBPA;
2193
2194 ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
2195 1, ARM_SMMU_POLL_TIMEOUT_US);
2196 if (ret)
2197 return ret;
2198
2199 reg &= ~clr;
2200 reg |= set;
2201 writel_relaxed(reg | GBPA_UPDATE, gbpa);
2202 return readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
2203 1, ARM_SMMU_POLL_TIMEOUT_US);
2204}
2205
Marc Zyngier166bdbd2015-10-13 18:32:30 +01002206static void arm_smmu_free_msis(void *data)
2207{
2208 struct device *dev = data;
2209 platform_msi_domain_free_irqs(dev);
2210}
2211
2212static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
2213{
2214 phys_addr_t doorbell;
2215 struct device *dev = msi_desc_to_dev(desc);
2216 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2217 phys_addr_t *cfg = arm_smmu_msi_cfg[desc->platform.msi_index];
2218
2219 doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
2220 doorbell &= MSI_CFG0_ADDR_MASK << MSI_CFG0_ADDR_SHIFT;
2221
2222 writeq_relaxed(doorbell, smmu->base + cfg[0]);
2223 writel_relaxed(msg->data, smmu->base + cfg[1]);
2224 writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]);
2225}
2226
2227static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
2228{
2229 struct msi_desc *desc;
2230 int ret, nvec = ARM_SMMU_MAX_MSIS;
2231 struct device *dev = smmu->dev;
2232
2233 /* Clear the MSI address regs */
2234 writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0);
2235 writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0);
2236
2237 if (smmu->features & ARM_SMMU_FEAT_PRI)
2238 writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0);
2239 else
2240 nvec--;
2241
2242 if (!(smmu->features & ARM_SMMU_FEAT_MSI))
2243 return;
2244
2245 /* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
2246 ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
2247 if (ret) {
2248 dev_warn(dev, "failed to allocate MSIs\n");
2249 return;
2250 }
2251
2252 for_each_msi_entry(desc, dev) {
2253 switch (desc->platform.msi_index) {
2254 case EVTQ_MSI_INDEX:
2255 smmu->evtq.q.irq = desc->irq;
2256 break;
2257 case GERROR_MSI_INDEX:
2258 smmu->gerr_irq = desc->irq;
2259 break;
2260 case PRIQ_MSI_INDEX:
2261 smmu->priq.q.irq = desc->irq;
2262 break;
2263 default: /* Unknown */
2264 continue;
2265 }
2266 }
2267
2268 /* Add callback to free MSIs on teardown */
2269 devm_add_action(dev, arm_smmu_free_msis, dev);
2270}
2271
Geetha Sowjanyaf9354482017-06-23 19:04:36 +05302272static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu)
Will Deacon48ec83b2015-05-27 17:25:59 +01002273{
Geetha Sowjanyaf9354482017-06-23 19:04:36 +05302274 int irq, ret;
Will Deacon48ec83b2015-05-27 17:25:59 +01002275
Marc Zyngier166bdbd2015-10-13 18:32:30 +01002276 arm_smmu_setup_msis(smmu);
Will Deacon48ec83b2015-05-27 17:25:59 +01002277
Marc Zyngier166bdbd2015-10-13 18:32:30 +01002278 /* Request interrupt lines */
Will Deacon48ec83b2015-05-27 17:25:59 +01002279 irq = smmu->evtq.q.irq;
2280 if (irq) {
Jean-Philippe Bruckerb4163fb2016-08-22 14:42:24 +01002281 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Will Deacon48ec83b2015-05-27 17:25:59 +01002282 arm_smmu_evtq_thread,
Jean-Philippe Bruckerb4163fb2016-08-22 14:42:24 +01002283 IRQF_ONESHOT,
2284 "arm-smmu-v3-evtq", smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002285 if (ret < 0)
Will Deacon48ec83b2015-05-27 17:25:59 +01002286 dev_warn(smmu->dev, "failed to enable evtq irq\n");
2287 }
2288
2289 irq = smmu->cmdq.q.irq;
2290 if (irq) {
2291 ret = devm_request_irq(smmu->dev, irq,
2292 arm_smmu_cmdq_sync_handler, 0,
2293 "arm-smmu-v3-cmdq-sync", smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002294 if (ret < 0)
Will Deacon48ec83b2015-05-27 17:25:59 +01002295 dev_warn(smmu->dev, "failed to enable cmdq-sync irq\n");
2296 }
2297
2298 irq = smmu->gerr_irq;
2299 if (irq) {
2300 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
2301 0, "arm-smmu-v3-gerror", smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002302 if (ret < 0)
Will Deacon48ec83b2015-05-27 17:25:59 +01002303 dev_warn(smmu->dev, "failed to enable gerror irq\n");
2304 }
2305
2306 if (smmu->features & ARM_SMMU_FEAT_PRI) {
Will Deacon48ec83b2015-05-27 17:25:59 +01002307 irq = smmu->priq.q.irq;
2308 if (irq) {
Jean-Philippe Bruckerb4163fb2016-08-22 14:42:24 +01002309 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Will Deacon48ec83b2015-05-27 17:25:59 +01002310 arm_smmu_priq_thread,
Jean-Philippe Bruckerb4163fb2016-08-22 14:42:24 +01002311 IRQF_ONESHOT,
2312 "arm-smmu-v3-priq",
Will Deacon48ec83b2015-05-27 17:25:59 +01002313 smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002314 if (ret < 0)
Will Deacon48ec83b2015-05-27 17:25:59 +01002315 dev_warn(smmu->dev,
2316 "failed to enable priq irq\n");
2317 }
2318 }
Geetha Sowjanyaf9354482017-06-23 19:04:36 +05302319}
2320
2321static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
2322{
2323 int ret, irq;
2324 u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
2325
2326 /* Disable IRQs first */
2327 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
2328 ARM_SMMU_IRQ_CTRLACK);
2329 if (ret) {
2330 dev_err(smmu->dev, "failed to disable irqs\n");
2331 return ret;
2332 }
2333
2334 irq = smmu->combined_irq;
2335 if (irq) {
2336 /*
2337 * Cavium ThunderX2 implementation doesn't not support unique
2338 * irq lines. Use single irq line for all the SMMUv3 interrupts.
2339 */
2340 ret = devm_request_threaded_irq(smmu->dev, irq,
2341 arm_smmu_combined_irq_handler,
2342 arm_smmu_combined_irq_thread,
2343 IRQF_ONESHOT,
2344 "arm-smmu-v3-combined-irq", smmu);
2345 if (ret < 0)
2346 dev_warn(smmu->dev, "failed to enable combined irq\n");
2347 } else
2348 arm_smmu_setup_unique_irqs(smmu);
2349
2350 if (smmu->features & ARM_SMMU_FEAT_PRI)
2351 irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
Will Deacon48ec83b2015-05-27 17:25:59 +01002352
2353 /* Enable interrupt generation on the SMMU */
Marc Zyngierccd63852015-07-15 11:55:18 +01002354 ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
Will Deacon48ec83b2015-05-27 17:25:59 +01002355 ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
2356 if (ret)
2357 dev_warn(smmu->dev, "failed to enable irqs\n");
2358
2359 return 0;
2360}
2361
2362static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
2363{
2364 int ret;
2365
2366 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_CR0, ARM_SMMU_CR0ACK);
2367 if (ret)
2368 dev_err(smmu->dev, "failed to clear cr0\n");
2369
2370 return ret;
2371}
2372
Robin Murphydc87a982016-09-12 17:13:44 +01002373static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
Will Deacon48ec83b2015-05-27 17:25:59 +01002374{
2375 int ret;
2376 u32 reg, enables;
2377 struct arm_smmu_cmdq_ent cmd;
2378
2379 /* Clear CR0 and sync (disables SMMU and queue processing) */
2380 reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
2381 if (reg & CR0_SMMUEN)
2382 dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
2383
2384 ret = arm_smmu_device_disable(smmu);
2385 if (ret)
2386 return ret;
2387
2388 /* CR1 (table and queue memory attributes) */
2389 reg = (CR1_SH_ISH << CR1_TABLE_SH_SHIFT) |
2390 (CR1_CACHE_WB << CR1_TABLE_OC_SHIFT) |
2391 (CR1_CACHE_WB << CR1_TABLE_IC_SHIFT) |
2392 (CR1_SH_ISH << CR1_QUEUE_SH_SHIFT) |
2393 (CR1_CACHE_WB << CR1_QUEUE_OC_SHIFT) |
2394 (CR1_CACHE_WB << CR1_QUEUE_IC_SHIFT);
2395 writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
2396
2397 /* CR2 (random crap) */
2398 reg = CR2_PTM | CR2_RECINVSID | CR2_E2H;
2399 writel_relaxed(reg, smmu->base + ARM_SMMU_CR2);
2400
2401 /* Stream table */
2402 writeq_relaxed(smmu->strtab_cfg.strtab_base,
2403 smmu->base + ARM_SMMU_STRTAB_BASE);
2404 writel_relaxed(smmu->strtab_cfg.strtab_base_cfg,
2405 smmu->base + ARM_SMMU_STRTAB_BASE_CFG);
2406
2407 /* Command queue */
2408 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
2409 writel_relaxed(smmu->cmdq.q.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
2410 writel_relaxed(smmu->cmdq.q.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
2411
2412 enables = CR0_CMDQEN;
2413 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2414 ARM_SMMU_CR0ACK);
2415 if (ret) {
2416 dev_err(smmu->dev, "failed to enable command queue\n");
2417 return ret;
2418 }
2419
2420 /* Invalidate any cached configuration */
2421 cmd.opcode = CMDQ_OP_CFGI_ALL;
2422 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
Robin Murphy2f657ad2017-08-31 14:44:25 +01002423 arm_smmu_cmdq_issue_sync(smmu);
Will Deacon48ec83b2015-05-27 17:25:59 +01002424
2425 /* Invalidate any stale TLB entries */
2426 if (smmu->features & ARM_SMMU_FEAT_HYP) {
2427 cmd.opcode = CMDQ_OP_TLBI_EL2_ALL;
2428 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2429 }
2430
2431 cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL;
2432 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
Robin Murphy2f657ad2017-08-31 14:44:25 +01002433 arm_smmu_cmdq_issue_sync(smmu);
Will Deacon48ec83b2015-05-27 17:25:59 +01002434
2435 /* Event queue */
2436 writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
Linu Cheriane5b829d2017-06-22 17:35:37 +05302437 writel_relaxed(smmu->evtq.q.prod,
2438 arm_smmu_page1_fixup(ARM_SMMU_EVTQ_PROD, smmu));
2439 writel_relaxed(smmu->evtq.q.cons,
2440 arm_smmu_page1_fixup(ARM_SMMU_EVTQ_CONS, smmu));
Will Deacon48ec83b2015-05-27 17:25:59 +01002441
2442 enables |= CR0_EVTQEN;
2443 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2444 ARM_SMMU_CR0ACK);
2445 if (ret) {
2446 dev_err(smmu->dev, "failed to enable event queue\n");
2447 return ret;
2448 }
2449
2450 /* PRI queue */
2451 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2452 writeq_relaxed(smmu->priq.q.q_base,
2453 smmu->base + ARM_SMMU_PRIQ_BASE);
2454 writel_relaxed(smmu->priq.q.prod,
Linu Cheriane5b829d2017-06-22 17:35:37 +05302455 arm_smmu_page1_fixup(ARM_SMMU_PRIQ_PROD, smmu));
Will Deacon48ec83b2015-05-27 17:25:59 +01002456 writel_relaxed(smmu->priq.q.cons,
Linu Cheriane5b829d2017-06-22 17:35:37 +05302457 arm_smmu_page1_fixup(ARM_SMMU_PRIQ_CONS, smmu));
Will Deacon48ec83b2015-05-27 17:25:59 +01002458
2459 enables |= CR0_PRIQEN;
2460 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2461 ARM_SMMU_CR0ACK);
2462 if (ret) {
2463 dev_err(smmu->dev, "failed to enable PRI queue\n");
2464 return ret;
2465 }
2466 }
2467
2468 ret = arm_smmu_setup_irqs(smmu);
2469 if (ret) {
2470 dev_err(smmu->dev, "failed to setup irqs\n");
2471 return ret;
2472 }
2473
Robin Murphydc87a982016-09-12 17:13:44 +01002474
2475 /* Enable the SMMU interface, or ensure bypass */
2476 if (!bypass || disable_bypass) {
2477 enables |= CR0_SMMUEN;
2478 } else {
2479 ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
2480 if (ret) {
2481 dev_err(smmu->dev, "GBPA not responding to update\n");
2482 return ret;
2483 }
2484 }
Will Deacon48ec83b2015-05-27 17:25:59 +01002485 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2486 ARM_SMMU_CR0ACK);
2487 if (ret) {
2488 dev_err(smmu->dev, "failed to enable SMMU interface\n");
2489 return ret;
2490 }
2491
2492 return 0;
2493}
2494
Lorenzo Pieralisi2985b522016-11-21 10:01:42 +00002495static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
Will Deacon48ec83b2015-05-27 17:25:59 +01002496{
2497 u32 reg;
Lorenzo Pieralisi2985b522016-11-21 10:01:42 +00002498 bool coherent = smmu->features & ARM_SMMU_FEAT_COHERENCY;
Will Deacon48ec83b2015-05-27 17:25:59 +01002499
2500 /* IDR0 */
2501 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
2502
2503 /* 2-level structures */
2504 if ((reg & IDR0_ST_LVL_MASK << IDR0_ST_LVL_SHIFT) == IDR0_ST_LVL_2LVL)
2505 smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB;
2506
2507 if (reg & IDR0_CD2L)
2508 smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB;
2509
2510 /*
2511 * Translation table endianness.
2512 * We currently require the same endianness as the CPU, but this
2513 * could be changed later by adding a new IO_PGTABLE_QUIRK.
2514 */
2515 switch (reg & IDR0_TTENDIAN_MASK << IDR0_TTENDIAN_SHIFT) {
2516 case IDR0_TTENDIAN_MIXED:
2517 smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE;
2518 break;
2519#ifdef __BIG_ENDIAN
2520 case IDR0_TTENDIAN_BE:
2521 smmu->features |= ARM_SMMU_FEAT_TT_BE;
2522 break;
2523#else
2524 case IDR0_TTENDIAN_LE:
2525 smmu->features |= ARM_SMMU_FEAT_TT_LE;
2526 break;
2527#endif
2528 default:
2529 dev_err(smmu->dev, "unknown/unsupported TT endianness!\n");
2530 return -ENXIO;
2531 }
2532
2533 /* Boolean feature flags */
2534 if (IS_ENABLED(CONFIG_PCI_PRI) && reg & IDR0_PRI)
2535 smmu->features |= ARM_SMMU_FEAT_PRI;
2536
2537 if (IS_ENABLED(CONFIG_PCI_ATS) && reg & IDR0_ATS)
2538 smmu->features |= ARM_SMMU_FEAT_ATS;
2539
2540 if (reg & IDR0_SEV)
2541 smmu->features |= ARM_SMMU_FEAT_SEV;
2542
2543 if (reg & IDR0_MSI)
2544 smmu->features |= ARM_SMMU_FEAT_MSI;
2545
2546 if (reg & IDR0_HYP)
2547 smmu->features |= ARM_SMMU_FEAT_HYP;
2548
2549 /*
Lorenzo Pieralisi2985b522016-11-21 10:01:42 +00002550 * The coherency feature as set by FW is used in preference to the ID
Will Deacon48ec83b2015-05-27 17:25:59 +01002551 * register, but warn on mismatch.
2552 */
Will Deacon48ec83b2015-05-27 17:25:59 +01002553 if (!!(reg & IDR0_COHACC) != coherent)
Robin Murphy2a22baa2017-09-25 14:55:40 +01002554 dev_warn(smmu->dev, "IDR0.COHACC overridden by FW configuration (%s)\n",
Will Deacon48ec83b2015-05-27 17:25:59 +01002555 coherent ? "true" : "false");
2556
Prem Mallappa6380be02015-12-14 22:01:23 +05302557 switch (reg & IDR0_STALL_MODEL_MASK << IDR0_STALL_MODEL_SHIFT) {
Prem Mallappa6380be02015-12-14 22:01:23 +05302558 case IDR0_STALL_MODEL_FORCE:
Yisheng Xie9cff86fd22017-09-21 20:36:07 +08002559 smmu->features |= ARM_SMMU_FEAT_STALL_FORCE;
2560 /* Fallthrough */
2561 case IDR0_STALL_MODEL_STALL:
Will Deacon48ec83b2015-05-27 17:25:59 +01002562 smmu->features |= ARM_SMMU_FEAT_STALLS;
Prem Mallappa6380be02015-12-14 22:01:23 +05302563 }
Will Deacon48ec83b2015-05-27 17:25:59 +01002564
2565 if (reg & IDR0_S1P)
2566 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
2567
2568 if (reg & IDR0_S2P)
2569 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
2570
2571 if (!(reg & (IDR0_S1P | IDR0_S2P))) {
2572 dev_err(smmu->dev, "no translation support!\n");
2573 return -ENXIO;
2574 }
2575
2576 /* We only support the AArch64 table format at present */
Will Deaconf0c453d2015-08-20 12:12:32 +01002577 switch (reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) {
2578 case IDR0_TTF_AARCH32_64:
2579 smmu->ias = 40;
2580 /* Fallthrough */
2581 case IDR0_TTF_AARCH64:
2582 break;
2583 default:
Will Deacon48ec83b2015-05-27 17:25:59 +01002584 dev_err(smmu->dev, "AArch64 table format not supported!\n");
2585 return -ENXIO;
2586 }
2587
2588 /* ASID/VMID sizes */
2589 smmu->asid_bits = reg & IDR0_ASID16 ? 16 : 8;
2590 smmu->vmid_bits = reg & IDR0_VMID16 ? 16 : 8;
2591
2592 /* IDR1 */
2593 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
2594 if (reg & (IDR1_TABLES_PRESET | IDR1_QUEUES_PRESET | IDR1_REL)) {
2595 dev_err(smmu->dev, "embedded implementation not supported\n");
2596 return -ENXIO;
2597 }
2598
2599 /* Queue sizes, capped at 4k */
2600 smmu->cmdq.q.max_n_shift = min((u32)CMDQ_MAX_SZ_SHIFT,
2601 reg >> IDR1_CMDQ_SHIFT & IDR1_CMDQ_MASK);
2602 if (!smmu->cmdq.q.max_n_shift) {
2603 /* Odd alignment restrictions on the base, so ignore for now */
2604 dev_err(smmu->dev, "unit-length command queue not supported\n");
2605 return -ENXIO;
2606 }
2607
2608 smmu->evtq.q.max_n_shift = min((u32)EVTQ_MAX_SZ_SHIFT,
2609 reg >> IDR1_EVTQ_SHIFT & IDR1_EVTQ_MASK);
2610 smmu->priq.q.max_n_shift = min((u32)PRIQ_MAX_SZ_SHIFT,
2611 reg >> IDR1_PRIQ_SHIFT & IDR1_PRIQ_MASK);
2612
2613 /* SID/SSID sizes */
2614 smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK;
2615 smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK;
2616
Nate Watterson692c4e42017-01-10 14:47:13 -05002617 /*
2618 * If the SMMU supports fewer bits than would fill a single L2 stream
2619 * table, use a linear table instead.
2620 */
2621 if (smmu->sid_bits <= STRTAB_SPLIT)
2622 smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB;
2623
Will Deacon48ec83b2015-05-27 17:25:59 +01002624 /* IDR5 */
2625 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
2626
2627 /* Maximum number of outstanding stalls */
2628 smmu->evtq.max_stalls = reg >> IDR5_STALL_MAX_SHIFT
2629 & IDR5_STALL_MAX_MASK;
2630
2631 /* Page sizes */
2632 if (reg & IDR5_GRAN64K)
Robin Murphyd5466352016-05-09 17:20:09 +01002633 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Will Deacon48ec83b2015-05-27 17:25:59 +01002634 if (reg & IDR5_GRAN16K)
Robin Murphyd5466352016-05-09 17:20:09 +01002635 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Will Deacon48ec83b2015-05-27 17:25:59 +01002636 if (reg & IDR5_GRAN4K)
Robin Murphyd5466352016-05-09 17:20:09 +01002637 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Will Deacon48ec83b2015-05-27 17:25:59 +01002638
Robin Murphyd5466352016-05-09 17:20:09 +01002639 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2640 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2641 else
2642 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
Will Deacon48ec83b2015-05-27 17:25:59 +01002643
2644 /* Output address size */
2645 switch (reg & IDR5_OAS_MASK << IDR5_OAS_SHIFT) {
2646 case IDR5_OAS_32_BIT:
2647 smmu->oas = 32;
2648 break;
2649 case IDR5_OAS_36_BIT:
2650 smmu->oas = 36;
2651 break;
2652 case IDR5_OAS_40_BIT:
2653 smmu->oas = 40;
2654 break;
2655 case IDR5_OAS_42_BIT:
2656 smmu->oas = 42;
2657 break;
2658 case IDR5_OAS_44_BIT:
2659 smmu->oas = 44;
2660 break;
Will Deacon85430962015-08-03 10:35:40 +01002661 default:
2662 dev_info(smmu->dev,
2663 "unknown output address size. Truncating to 48-bit\n");
2664 /* Fallthrough */
Will Deacon48ec83b2015-05-27 17:25:59 +01002665 case IDR5_OAS_48_BIT:
2666 smmu->oas = 48;
Will Deacon48ec83b2015-05-27 17:25:59 +01002667 }
2668
2669 /* Set the DMA mask for our table walker */
2670 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
2671 dev_warn(smmu->dev,
2672 "failed to set DMA mask for table walker\n");
2673
Will Deaconf0c453d2015-08-20 12:12:32 +01002674 smmu->ias = max(smmu->ias, smmu->oas);
Will Deacon48ec83b2015-05-27 17:25:59 +01002675
2676 dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
2677 smmu->ias, smmu->oas, smmu->features);
2678 return 0;
2679}
2680
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +00002681#ifdef CONFIG_ACPI
Linu Cheriane5b829d2017-06-22 17:35:37 +05302682static void acpi_smmu_get_options(u32 model, struct arm_smmu_device *smmu)
2683{
shameer99caf172017-05-17 10:12:05 +01002684 switch (model) {
2685 case ACPI_IORT_SMMU_V3_CAVIUM_CN99XX:
Linu Cheriane5b829d2017-06-22 17:35:37 +05302686 smmu->options |= ARM_SMMU_OPT_PAGE0_REGS_ONLY;
shameer99caf172017-05-17 10:12:05 +01002687 break;
Robin Murphy6948d4a2017-09-22 15:04:00 +01002688 case ACPI_IORT_SMMU_V3_HISILICON_HI161X:
shameer99caf172017-05-17 10:12:05 +01002689 smmu->options |= ARM_SMMU_OPT_SKIP_PREFETCH;
2690 break;
2691 }
Linu Cheriane5b829d2017-06-22 17:35:37 +05302692
2693 dev_notice(smmu->dev, "option mask 0x%x\n", smmu->options);
2694}
2695
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +00002696static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2697 struct arm_smmu_device *smmu)
2698{
2699 struct acpi_iort_smmu_v3 *iort_smmu;
2700 struct device *dev = smmu->dev;
2701 struct acpi_iort_node *node;
2702
2703 node = *(struct acpi_iort_node **)dev_get_platdata(dev);
2704
2705 /* Retrieve SMMUv3 specific data */
2706 iort_smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
2707
Linu Cheriane5b829d2017-06-22 17:35:37 +05302708 acpi_smmu_get_options(iort_smmu->model, smmu);
2709
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +00002710 if (iort_smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE)
2711 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
2712
2713 return 0;
2714}
2715#else
2716static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2717 struct arm_smmu_device *smmu)
2718{
2719 return -ENODEV;
2720}
2721#endif
2722
Lorenzo Pieralisi2985b522016-11-21 10:01:42 +00002723static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2724 struct arm_smmu_device *smmu)
Will Deacon48ec83b2015-05-27 17:25:59 +01002725{
Will Deacon48ec83b2015-05-27 17:25:59 +01002726 struct device *dev = &pdev->dev;
Robin Murphydc87a982016-09-12 17:13:44 +01002727 u32 cells;
Lorenzo Pieralisi2985b522016-11-21 10:01:42 +00002728 int ret = -EINVAL;
Robin Murphydc87a982016-09-12 17:13:44 +01002729
2730 if (of_property_read_u32(dev->of_node, "#iommu-cells", &cells))
2731 dev_err(dev, "missing #iommu-cells property\n");
2732 else if (cells != 1)
2733 dev_err(dev, "invalid #iommu-cells value (%d)\n", cells);
2734 else
Lorenzo Pieralisi2985b522016-11-21 10:01:42 +00002735 ret = 0;
2736
2737 parse_driver_options(smmu);
2738
2739 if (of_dma_is_coherent(dev->of_node))
2740 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
2741
2742 return ret;
2743}
2744
Linu Cheriane5b829d2017-06-22 17:35:37 +05302745static unsigned long arm_smmu_resource_size(struct arm_smmu_device *smmu)
2746{
2747 if (smmu->options & ARM_SMMU_OPT_PAGE0_REGS_ONLY)
2748 return SZ_64K;
2749 else
2750 return SZ_128K;
2751}
2752
Lorenzo Pieralisi2985b522016-11-21 10:01:42 +00002753static int arm_smmu_device_probe(struct platform_device *pdev)
2754{
2755 int irq, ret;
2756 struct resource *res;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002757 resource_size_t ioaddr;
Lorenzo Pieralisi2985b522016-11-21 10:01:42 +00002758 struct arm_smmu_device *smmu;
2759 struct device *dev = &pdev->dev;
2760 bool bypass;
Will Deacon48ec83b2015-05-27 17:25:59 +01002761
2762 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2763 if (!smmu) {
2764 dev_err(dev, "failed to allocate arm_smmu_device\n");
2765 return -ENOMEM;
2766 }
2767 smmu->dev = dev;
2768
Linu Cheriane5b829d2017-06-22 17:35:37 +05302769 if (dev->of_node) {
2770 ret = arm_smmu_device_dt_probe(pdev, smmu);
2771 } else {
2772 ret = arm_smmu_device_acpi_probe(pdev, smmu);
2773 if (ret == -ENODEV)
2774 return ret;
2775 }
2776
2777 /* Set bypass mode according to firmware probing result */
2778 bypass = !!ret;
2779
Will Deacon48ec83b2015-05-27 17:25:59 +01002780 /* Base address */
2781 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Linu Cheriane5b829d2017-06-22 17:35:37 +05302782 if (resource_size(res) + 1 < arm_smmu_resource_size(smmu)) {
Will Deacon48ec83b2015-05-27 17:25:59 +01002783 dev_err(dev, "MMIO region too small (%pr)\n", res);
2784 return -EINVAL;
2785 }
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002786 ioaddr = res->start;
Will Deacon48ec83b2015-05-27 17:25:59 +01002787
2788 smmu->base = devm_ioremap_resource(dev, res);
2789 if (IS_ERR(smmu->base))
2790 return PTR_ERR(smmu->base);
2791
2792 /* Interrupt lines */
Will Deacon48ec83b2015-05-27 17:25:59 +01002793
Geetha Sowjanyaf9354482017-06-23 19:04:36 +05302794 irq = platform_get_irq_byname(pdev, "combined");
Will Deacon48ec83b2015-05-27 17:25:59 +01002795 if (irq > 0)
Geetha Sowjanyaf9354482017-06-23 19:04:36 +05302796 smmu->combined_irq = irq;
2797 else {
2798 irq = platform_get_irq_byname(pdev, "eventq");
2799 if (irq > 0)
2800 smmu->evtq.q.irq = irq;
Will Deacon48ec83b2015-05-27 17:25:59 +01002801
Geetha Sowjanyaf9354482017-06-23 19:04:36 +05302802 irq = platform_get_irq_byname(pdev, "priq");
2803 if (irq > 0)
2804 smmu->priq.q.irq = irq;
Will Deacon48ec83b2015-05-27 17:25:59 +01002805
Geetha Sowjanyaf9354482017-06-23 19:04:36 +05302806 irq = platform_get_irq_byname(pdev, "cmdq-sync");
2807 if (irq > 0)
2808 smmu->cmdq.q.irq = irq;
Will Deacon48ec83b2015-05-27 17:25:59 +01002809
Geetha Sowjanyaf9354482017-06-23 19:04:36 +05302810 irq = platform_get_irq_byname(pdev, "gerror");
2811 if (irq > 0)
2812 smmu->gerr_irq = irq;
2813 }
Will Deacon48ec83b2015-05-27 17:25:59 +01002814 /* Probe the h/w */
Lorenzo Pieralisi2985b522016-11-21 10:01:42 +00002815 ret = arm_smmu_device_hw_probe(smmu);
Will Deacon48ec83b2015-05-27 17:25:59 +01002816 if (ret)
2817 return ret;
2818
2819 /* Initialise in-memory data structures */
2820 ret = arm_smmu_init_structures(smmu);
2821 if (ret)
2822 return ret;
2823
Marc Zyngier166bdbd2015-10-13 18:32:30 +01002824 /* Record our private device structure */
2825 platform_set_drvdata(pdev, smmu);
2826
Will Deacon48ec83b2015-05-27 17:25:59 +01002827 /* Reset the device */
Robin Murphy8f785152016-09-12 17:13:45 +01002828 ret = arm_smmu_device_reset(smmu, bypass);
2829 if (ret)
2830 return ret;
2831
2832 /* And we're up. Go go go! */
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002833 ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL,
2834 "smmu3.%pa", &ioaddr);
Robin Murphy08d4ca22016-09-12 17:13:46 +01002835 if (ret)
2836 return ret;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002837
2838 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2839 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2840
2841 ret = iommu_device_register(&smmu->iommu);
Arvind Yadav5c2d0212017-06-22 12:57:42 +05302842 if (ret) {
2843 dev_err(dev, "Failed to register iommu\n");
2844 return ret;
2845 }
Lorenzo Pieralisi778de072016-11-21 10:01:38 +00002846
Robin Murphy8f785152016-09-12 17:13:45 +01002847#ifdef CONFIG_PCI
Robin Murphyec615f42016-11-03 17:39:07 +00002848 if (pci_bus_type.iommu_ops != &arm_smmu_ops) {
2849 pci_request_acs();
2850 ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2851 if (ret)
2852 return ret;
2853 }
Robin Murphy08d4ca22016-09-12 17:13:46 +01002854#endif
2855#ifdef CONFIG_ARM_AMBA
Robin Murphyec615f42016-11-03 17:39:07 +00002856 if (amba_bustype.iommu_ops != &arm_smmu_ops) {
2857 ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2858 if (ret)
2859 return ret;
2860 }
Robin Murphy08d4ca22016-09-12 17:13:46 +01002861#endif
Robin Murphyec615f42016-11-03 17:39:07 +00002862 if (platform_bus_type.iommu_ops != &arm_smmu_ops) {
2863 ret = bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2864 if (ret)
2865 return ret;
2866 }
2867 return 0;
Will Deacon48ec83b2015-05-27 17:25:59 +01002868}
2869
2870static int arm_smmu_device_remove(struct platform_device *pdev)
2871{
Will Deacon941a8022015-08-11 16:25:10 +01002872 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon48ec83b2015-05-27 17:25:59 +01002873
2874 arm_smmu_device_disable(smmu);
Nate Watterson7aa86192017-06-29 18:18:15 -04002875
Will Deacon48ec83b2015-05-27 17:25:59 +01002876 return 0;
2877}
2878
Nate Watterson7aa86192017-06-29 18:18:15 -04002879static void arm_smmu_device_shutdown(struct platform_device *pdev)
2880{
2881 arm_smmu_device_remove(pdev);
2882}
2883
Arvind Yadavebdd13c2017-06-22 12:51:00 +05302884static const struct of_device_id arm_smmu_of_match[] = {
Will Deacon48ec83b2015-05-27 17:25:59 +01002885 { .compatible = "arm,smmu-v3", },
2886 { },
2887};
2888MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2889
2890static struct platform_driver arm_smmu_driver = {
2891 .driver = {
2892 .name = "arm-smmu-v3",
2893 .of_match_table = of_match_ptr(arm_smmu_of_match),
2894 },
Lorenzo Pieralisi2985b522016-11-21 10:01:42 +00002895 .probe = arm_smmu_device_probe,
Will Deacon48ec83b2015-05-27 17:25:59 +01002896 .remove = arm_smmu_device_remove,
Nate Watterson7aa86192017-06-29 18:18:15 -04002897 .shutdown = arm_smmu_device_shutdown,
Will Deacon48ec83b2015-05-27 17:25:59 +01002898};
Robin Murphyf6810c12017-04-10 16:51:05 +05302899module_platform_driver(arm_smmu_driver);
Will Deacon48ec83b2015-05-27 17:25:59 +01002900
Robin Murphyf6810c12017-04-10 16:51:05 +05302901IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", NULL);
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +00002902
Will Deacon48ec83b2015-05-27 17:25:59 +01002903MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
2904MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2905MODULE_LICENSE("GPL v2");