blob: 0a5fa126f3790f6729316434997cbf338807a49f [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +030027 * - Extended Stream ID (16 bit)
Will Deacon45ae7cf2013-06-24 18:31:25 +010028 */
29
30#define pr_fmt(fmt) "arm-smmu: " fmt
31
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +000032#include <linux/acpi.h>
33#include <linux/acpi_iort.h>
Robin Murphy1f3d5ca2016-09-12 17:13:49 +010034#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010035#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000036#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010037#include <linux/dma-mapping.h>
38#include <linux/err.h>
39#include <linux/interrupt.h>
40#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010041#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010042#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000043#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010044#include <linux/module.h>
45#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010046#include <linux/of_address.h>
Robin Murphyd6fc5d92016-09-12 17:13:52 +010047#include <linux/of_device.h>
Robin Murphyadfec2e2016-09-12 17:13:55 +010048#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010049#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010050#include <linux/platform_device.h>
51#include <linux/slab.h>
52#include <linux/spinlock.h>
53
54#include <linux/amba/bus.h>
55
Will Deacon518f7132014-11-14 17:17:54 +000056#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010057
Will Deacon45ae7cf2013-06-24 18:31:25 +010058/* Maximum number of context banks per SMMU */
59#define ARM_SMMU_MAX_CBS 128
60
Will Deacon45ae7cf2013-06-24 18:31:25 +010061/* SMMU global address space */
62#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010063#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010064
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000065/*
66 * SMMU global address space with conditional offset to access secure
67 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
68 * nsGFSYNR0: 0x450)
69 */
70#define ARM_SMMU_GR0_NS(smmu) \
71 ((smmu)->base + \
72 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
73 ? 0x400 : 0))
74
Robin Murphyf9a05f02016-04-13 18:13:01 +010075/*
76 * Some 64-bit registers only make sense to write atomically, but in such
77 * cases all the data relevant to AArch32 formats lies within the lower word,
78 * therefore this actually makes more sense than it might first appear.
79 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010080#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010081#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010082#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010083#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010084#endif
85
Will Deacon45ae7cf2013-06-24 18:31:25 +010086/* Configuration registers */
87#define ARM_SMMU_GR0_sCR0 0x0
88#define sCR0_CLIENTPD (1 << 0)
89#define sCR0_GFRE (1 << 1)
90#define sCR0_GFIE (1 << 2)
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +030091#define sCR0_EXIDENABLE (1 << 3)
Will Deacon45ae7cf2013-06-24 18:31:25 +010092#define sCR0_GCFGFRE (1 << 4)
93#define sCR0_GCFGFIE (1 << 5)
94#define sCR0_USFCFG (1 << 10)
95#define sCR0_VMIDPNE (1 << 11)
96#define sCR0_PTM (1 << 12)
97#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080098#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010099#define sCR0_BSU_SHIFT 14
100#define sCR0_BSU_MASK 0x3
101
Peng Fan3ca37122016-05-03 21:50:30 +0800102/* Auxiliary Configuration register */
103#define ARM_SMMU_GR0_sACR 0x10
104
Will Deacon45ae7cf2013-06-24 18:31:25 +0100105/* Identification registers */
106#define ARM_SMMU_GR0_ID0 0x20
107#define ARM_SMMU_GR0_ID1 0x24
108#define ARM_SMMU_GR0_ID2 0x28
109#define ARM_SMMU_GR0_ID3 0x2c
110#define ARM_SMMU_GR0_ID4 0x30
111#define ARM_SMMU_GR0_ID5 0x34
112#define ARM_SMMU_GR0_ID6 0x38
113#define ARM_SMMU_GR0_ID7 0x3c
114#define ARM_SMMU_GR0_sGFSR 0x48
115#define ARM_SMMU_GR0_sGFSYNR0 0x50
116#define ARM_SMMU_GR0_sGFSYNR1 0x54
117#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100118
119#define ID0_S1TS (1 << 30)
120#define ID0_S2TS (1 << 29)
121#define ID0_NTS (1 << 28)
122#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000123#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100124#define ID0_PTFS_NO_AARCH32 (1 << 25)
125#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100126#define ID0_CTTW (1 << 14)
127#define ID0_NUMIRPT_SHIFT 16
128#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700129#define ID0_NUMSIDB_SHIFT 9
130#define ID0_NUMSIDB_MASK 0xf
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300131#define ID0_EXIDS (1 << 8)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100132#define ID0_NUMSMRG_SHIFT 0
133#define ID0_NUMSMRG_MASK 0xff
134
135#define ID1_PAGESIZE (1 << 31)
136#define ID1_NUMPAGENDXB_SHIFT 28
137#define ID1_NUMPAGENDXB_MASK 7
138#define ID1_NUMS2CB_SHIFT 16
139#define ID1_NUMS2CB_MASK 0xff
140#define ID1_NUMCB_SHIFT 0
141#define ID1_NUMCB_MASK 0xff
142
143#define ID2_OAS_SHIFT 4
144#define ID2_OAS_MASK 0xf
145#define ID2_IAS_SHIFT 0
146#define ID2_IAS_MASK 0xf
147#define ID2_UBS_SHIFT 8
148#define ID2_UBS_MASK 0xf
149#define ID2_PTFS_4K (1 << 12)
150#define ID2_PTFS_16K (1 << 13)
151#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800152#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100153
Peng Fan3ca37122016-05-03 21:50:30 +0800154#define ID7_MAJOR_SHIFT 4
155#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100156
Will Deacon45ae7cf2013-06-24 18:31:25 +0100157/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100158#define ARM_SMMU_GR0_TLBIVMID 0x64
159#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
160#define ARM_SMMU_GR0_TLBIALLH 0x6c
161#define ARM_SMMU_GR0_sTLBGSYNC 0x70
162#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
163#define sTLBGSTATUS_GSACTIVE (1 << 0)
164#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
Robin Murphy8513c892017-03-30 17:56:32 +0100165#define TLB_SPIN_COUNT 10
Will Deacon45ae7cf2013-06-24 18:31:25 +0100166
167/* Stream mapping registers */
168#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
169#define SMR_VALID (1 << 31)
170#define SMR_MASK_SHIFT 16
Will Deacon45ae7cf2013-06-24 18:31:25 +0100171#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100172
173#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
174#define S2CR_CBNDX_SHIFT 0
175#define S2CR_CBNDX_MASK 0xff
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300176#define S2CR_EXIDVALID (1 << 10)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100177#define S2CR_TYPE_SHIFT 16
178#define S2CR_TYPE_MASK 0x3
Robin Murphy8e8b2032016-09-12 17:13:50 +0100179enum arm_smmu_s2cr_type {
180 S2CR_TYPE_TRANS,
181 S2CR_TYPE_BYPASS,
182 S2CR_TYPE_FAULT,
183};
Will Deacon45ae7cf2013-06-24 18:31:25 +0100184
Robin Murphyd3461802016-01-26 18:06:34 +0000185#define S2CR_PRIVCFG_SHIFT 24
Robin Murphy8e8b2032016-09-12 17:13:50 +0100186#define S2CR_PRIVCFG_MASK 0x3
187enum arm_smmu_s2cr_privcfg {
188 S2CR_PRIVCFG_DEFAULT,
189 S2CR_PRIVCFG_DIPAN,
190 S2CR_PRIVCFG_UNPRIV,
191 S2CR_PRIVCFG_PRIV,
192};
Robin Murphyd3461802016-01-26 18:06:34 +0000193
Will Deacon45ae7cf2013-06-24 18:31:25 +0100194/* Context bank attribute registers */
195#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
196#define CBAR_VMID_SHIFT 0
197#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000198#define CBAR_S1_BPSHCFG_SHIFT 8
199#define CBAR_S1_BPSHCFG_MASK 3
200#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100201#define CBAR_S1_MEMATTR_SHIFT 12
202#define CBAR_S1_MEMATTR_MASK 0xf
203#define CBAR_S1_MEMATTR_WB 0xf
204#define CBAR_TYPE_SHIFT 16
205#define CBAR_TYPE_MASK 0x3
206#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
207#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
208#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
209#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
210#define CBAR_IRPTNDX_SHIFT 24
211#define CBAR_IRPTNDX_MASK 0xff
212
213#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
214#define CBA2R_RW64_32BIT (0 << 0)
215#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800216#define CBA2R_VMID_SHIFT 16
217#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100218
219/* Translation context bank */
Robin Murphy452107c2017-03-30 17:56:30 +0100220#define ARM_SMMU_CB(smmu, n) ((smmu)->cb_base + ((n) << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100221
222#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100223#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100224#define ARM_SMMU_CB_RESUME 0x8
225#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100226#define ARM_SMMU_CB_TTBR0 0x20
227#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100228#define ARM_SMMU_CB_TTBCR 0x30
Robin Murphy60705292016-08-11 17:44:06 +0100229#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100230#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000231#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100232#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100233#define ARM_SMMU_CB_FSR 0x58
Robin Murphyf9a05f02016-04-13 18:13:01 +0100234#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100235#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000236#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100237#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000238#define ARM_SMMU_CB_S1_TLBIVAL 0x620
239#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
240#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Robin Murphy11febfc2017-03-30 17:56:31 +0100241#define ARM_SMMU_CB_TLBSYNC 0x7f0
242#define ARM_SMMU_CB_TLBSTATUS 0x7f4
Robin Murphy661d9622015-05-27 17:09:34 +0100243#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000244#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100245
246#define SCTLR_S1_ASIDPNE (1 << 12)
247#define SCTLR_CFCFG (1 << 7)
248#define SCTLR_CFIE (1 << 6)
249#define SCTLR_CFRE (1 << 5)
250#define SCTLR_E (1 << 4)
251#define SCTLR_AFE (1 << 2)
252#define SCTLR_TRE (1 << 1)
253#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100254
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100255#define ARM_MMU500_ACTLR_CPRE (1 << 1)
256
Peng Fan3ca37122016-05-03 21:50:30 +0800257#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
Nipun Gupta6eb18d42016-11-04 15:25:23 +0530258#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
Peng Fan3ca37122016-05-03 21:50:30 +0800259
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000260#define CB_PAR_F (1 << 0)
261
262#define ATSR_ACTIVE (1 << 0)
263
Will Deacon45ae7cf2013-06-24 18:31:25 +0100264#define RESUME_RETRY (0 << 0)
265#define RESUME_TERMINATE (1 << 0)
266
Will Deacon45ae7cf2013-06-24 18:31:25 +0100267#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100268#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Tomasz Nowicki3677a642017-01-16 08:16:07 +0100269#define TTBCR2_AS (1 << 4)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100270
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100271#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100272
273#define FSR_MULTI (1 << 31)
274#define FSR_SS (1 << 30)
275#define FSR_UUT (1 << 8)
276#define FSR_ASF (1 << 7)
277#define FSR_TLBLKF (1 << 6)
278#define FSR_TLBMCF (1 << 5)
279#define FSR_EF (1 << 4)
280#define FSR_PF (1 << 3)
281#define FSR_AFF (1 << 2)
282#define FSR_TF (1 << 1)
283
Mitchel Humpherys29073202014-07-08 09:52:18 -0700284#define FSR_IGN (FSR_AFF | FSR_ASF | \
285 FSR_TLBMCF | FSR_TLBLKF)
286#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100287 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100288
289#define FSYNR0_WNR (1 << 4)
290
Eric Augerf3ebee82017-01-19 20:57:55 +0000291#define MSI_IOVA_BASE 0x8000000
292#define MSI_IOVA_LENGTH 0x100000
293
Will Deacon4cf740b2014-07-14 19:47:39 +0100294static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000295module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100296MODULE_PARM_DESC(force_stage,
297 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000298static bool disable_bypass;
299module_param(disable_bypass, bool, S_IRUGO);
300MODULE_PARM_DESC(disable_bypass,
301 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100302
Robin Murphy09360402014-08-28 17:51:59 +0100303enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100304 ARM_SMMU_V1,
305 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100306 ARM_SMMU_V2,
307};
308
Robin Murphy67b65a32016-04-13 18:12:57 +0100309enum arm_smmu_implementation {
310 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100311 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100312 CAVIUM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100313};
314
Robin Murphy84c24372017-06-19 16:41:56 +0100315/* Until ACPICA headers cover IORT rev. C */
316#ifndef ACPI_IORT_SMMU_CORELINK_MMU401
317#define ACPI_IORT_SMMU_CORELINK_MMU401 0x4
318#endif
319#ifndef ACPI_IORT_SMMU_CAVIUM_THUNDERX
320#define ACPI_IORT_SMMU_CAVIUM_THUNDERX 0x5
321#endif
322
Robin Murphy8e8b2032016-09-12 17:13:50 +0100323struct arm_smmu_s2cr {
Robin Murphy588888a2016-09-12 17:13:54 +0100324 struct iommu_group *group;
325 int count;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100326 enum arm_smmu_s2cr_type type;
327 enum arm_smmu_s2cr_privcfg privcfg;
328 u8 cbndx;
329};
330
331#define s2cr_init_val (struct arm_smmu_s2cr){ \
332 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
333}
334
Will Deacon45ae7cf2013-06-24 18:31:25 +0100335struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100336 u16 mask;
337 u16 id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100338 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100339};
340
Robin Murphy90df3732017-08-08 14:56:14 +0100341struct arm_smmu_cb {
342 u64 ttbr[2];
343 u32 tcr[2];
344 u32 mair[2];
345 struct arm_smmu_cfg *cfg;
346};
347
Will Deacona9a1b0b2014-05-01 18:05:08 +0100348struct arm_smmu_master_cfg {
Robin Murphyf80cd882016-09-14 15:21:39 +0100349 struct arm_smmu_device *smmu;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100350 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100351};
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100352#define INVALID_SMENDX -1
Robin Murphyadfec2e2016-09-12 17:13:55 +0100353#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
354#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000355#define fwspec_smendx(fw, i) \
356 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphyadfec2e2016-09-12 17:13:55 +0100357#define for_each_cfg_sme(fw, i, idx) \
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000358 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100359
360struct arm_smmu_device {
361 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100362
363 void __iomem *base;
Robin Murphy452107c2017-03-30 17:56:30 +0100364 void __iomem *cb_base;
Will Deaconc757e852014-07-30 11:33:25 +0100365 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100366
367#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
368#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
369#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
370#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
371#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000372#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800373#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100374#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
375#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
376#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
377#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
378#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300379#define ARM_SMMU_FEAT_EXIDS (1 << 12)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100380 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000381
382#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
383 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100384 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100385 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100386
387 u32 num_context_banks;
388 u32 num_s2_context_banks;
389 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
Robin Murphy90df3732017-08-08 14:56:14 +0100390 struct arm_smmu_cb *cbs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100391 atomic_t irptndx;
392
393 u32 num_mapping_groups;
Robin Murphy21174242016-09-12 17:13:48 +0100394 u16 streamid_mask;
395 u16 smr_mask_mask;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100396 struct arm_smmu_smr *smrs;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100397 struct arm_smmu_s2cr *s2crs;
Robin Murphy588888a2016-09-12 17:13:54 +0100398 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100399
Will Deacon518f7132014-11-14 17:17:54 +0000400 unsigned long va_size;
401 unsigned long ipa_size;
402 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100403 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100404
405 u32 num_global_irqs;
406 u32 num_context_irqs;
407 unsigned int *irqs;
408
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800409 u32 cavium_id_base; /* Specific to Cavium */
Joerg Roedel9648cbc2017-02-01 18:11:36 +0100410
Will Deacon8e517e72017-07-06 15:55:48 +0100411 spinlock_t global_sync_lock;
412
Joerg Roedel9648cbc2017-02-01 18:11:36 +0100413 /* IOMMU core code handle */
414 struct iommu_device iommu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100415};
416
Robin Murphy7602b872016-04-28 17:12:09 +0100417enum arm_smmu_context_fmt {
418 ARM_SMMU_CTX_FMT_NONE,
419 ARM_SMMU_CTX_FMT_AARCH64,
420 ARM_SMMU_CTX_FMT_AARCH32_L,
421 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100422};
423
424struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100425 u8 cbndx;
426 u8 irptndx;
Robin Murphy280b6832017-03-30 17:56:29 +0100427 union {
428 u16 asid;
429 u16 vmid;
430 };
Will Deacon45ae7cf2013-06-24 18:31:25 +0100431 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100432 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100433};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100434#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100435
Will Deaconc752ce42014-06-25 22:46:31 +0100436enum arm_smmu_domain_stage {
437 ARM_SMMU_DOMAIN_S1 = 0,
438 ARM_SMMU_DOMAIN_S2,
439 ARM_SMMU_DOMAIN_NESTED,
Will Deacon61bc6712017-01-06 16:56:03 +0000440 ARM_SMMU_DOMAIN_BYPASS,
Will Deaconc752ce42014-06-25 22:46:31 +0100441};
442
Will Deacon45ae7cf2013-06-24 18:31:25 +0100443struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100444 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000445 struct io_pgtable_ops *pgtbl_ops;
Will Deacon44680ee2014-06-25 11:29:12 +0100446 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100447 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000448 struct mutex init_mutex; /* Protects smmu pointer */
Will Deacon8e517e72017-07-06 15:55:48 +0100449 spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
Joerg Roedel1d672632015-03-26 13:43:10 +0100450 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100451};
452
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000453struct arm_smmu_option_prop {
454 u32 opt;
455 const char *prop;
456};
457
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800458static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
459
Robin Murphy021bb842016-09-14 15:26:46 +0100460static bool using_legacy_binding, using_generic_binding;
461
Mitchel Humpherys29073202014-07-08 09:52:18 -0700462static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000463 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
464 { 0, NULL},
465};
466
Joerg Roedel1d672632015-03-26 13:43:10 +0100467static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
468{
469 return container_of(dom, struct arm_smmu_domain, domain);
470}
471
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000472static void parse_driver_options(struct arm_smmu_device *smmu)
473{
474 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700475
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000476 do {
477 if (of_property_read_bool(smmu->dev->of_node,
478 arm_smmu_options[i].prop)) {
479 smmu->options |= arm_smmu_options[i].opt;
480 dev_notice(smmu->dev, "option %s\n",
481 arm_smmu_options[i].prop);
482 }
483 } while (arm_smmu_options[++i].opt);
484}
485
Will Deacon8f68f8e2014-07-15 11:27:08 +0100486static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100487{
488 if (dev_is_pci(dev)) {
489 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700490
Will Deacona9a1b0b2014-05-01 18:05:08 +0100491 while (!pci_is_root_bus(bus))
492 bus = bus->parent;
Robin Murphyf80cd882016-09-14 15:21:39 +0100493 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100494 }
495
Robin Murphyf80cd882016-09-14 15:21:39 +0100496 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100497}
498
Robin Murphyf80cd882016-09-14 15:21:39 +0100499static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100500{
Robin Murphyf80cd882016-09-14 15:21:39 +0100501 *((__be32 *)data) = cpu_to_be32(alias);
502 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100503}
504
Robin Murphyf80cd882016-09-14 15:21:39 +0100505static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100506{
Robin Murphyf80cd882016-09-14 15:21:39 +0100507 struct of_phandle_iterator *it = *(void **)data;
508 struct device_node *np = it->node;
509 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100510
Robin Murphyf80cd882016-09-14 15:21:39 +0100511 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
512 "#stream-id-cells", 0)
513 if (it->node == np) {
514 *(void **)data = dev;
515 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700516 }
Robin Murphyf80cd882016-09-14 15:21:39 +0100517 it->node = np;
518 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100519}
520
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100521static struct platform_driver arm_smmu_driver;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100522static struct iommu_ops arm_smmu_ops;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100523
Robin Murphyadfec2e2016-09-12 17:13:55 +0100524static int arm_smmu_register_legacy_master(struct device *dev,
525 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100526{
Robin Murphyadfec2e2016-09-12 17:13:55 +0100527 struct device *smmu_dev;
Robin Murphyf80cd882016-09-14 15:21:39 +0100528 struct device_node *np;
529 struct of_phandle_iterator it;
530 void *data = &it;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100531 u32 *sids;
Robin Murphyf80cd882016-09-14 15:21:39 +0100532 __be32 pci_sid;
533 int err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100534
Robin Murphyf80cd882016-09-14 15:21:39 +0100535 np = dev_get_dev_node(dev);
536 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
537 of_node_put(np);
538 return -ENODEV;
539 }
540
541 it.node = np;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100542 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
543 __find_legacy_master_phandle);
Robin Murphyadfec2e2016-09-12 17:13:55 +0100544 smmu_dev = data;
Robin Murphyf80cd882016-09-14 15:21:39 +0100545 of_node_put(np);
546 if (err == 0)
547 return -ENODEV;
548 if (err < 0)
549 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100550
Robin Murphyf80cd882016-09-14 15:21:39 +0100551 if (dev_is_pci(dev)) {
552 /* "mmu-masters" assumes Stream ID == Requester ID */
553 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
554 &pci_sid);
555 it.cur = &pci_sid;
556 it.cur_count = 1;
557 }
558
Robin Murphyadfec2e2016-09-12 17:13:55 +0100559 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
560 &arm_smmu_ops);
561 if (err)
562 return err;
563
564 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
565 if (!sids)
Robin Murphyf80cd882016-09-14 15:21:39 +0100566 return -ENOMEM;
567
Robin Murphyadfec2e2016-09-12 17:13:55 +0100568 *smmu = dev_get_drvdata(smmu_dev);
569 of_phandle_iterator_args(&it, sids, it.cur_count);
570 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
571 kfree(sids);
572 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100573}
574
575static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
576{
577 int idx;
578
579 do {
580 idx = find_next_zero_bit(map, end, start);
581 if (idx == end)
582 return -ENOSPC;
583 } while (test_and_set_bit(idx, map));
584
585 return idx;
586}
587
588static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
589{
590 clear_bit(idx, map);
591}
592
593/* Wait for any pending TLB invalidations to complete */
Robin Murphy11febfc2017-03-30 17:56:31 +0100594static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
595 void __iomem *sync, void __iomem *status)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100596{
Robin Murphy8513c892017-03-30 17:56:32 +0100597 unsigned int spin_cnt, delay;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100598
Robin Murphy11febfc2017-03-30 17:56:31 +0100599 writel_relaxed(0, sync);
Robin Murphy8513c892017-03-30 17:56:32 +0100600 for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
601 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
602 if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
603 return;
604 cpu_relax();
Will Deacon45ae7cf2013-06-24 18:31:25 +0100605 }
Robin Murphy8513c892017-03-30 17:56:32 +0100606 udelay(delay);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100607 }
Robin Murphy8513c892017-03-30 17:56:32 +0100608 dev_err_ratelimited(smmu->dev,
609 "TLB sync timed out -- SMMU may be deadlocked\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +0100610}
611
Robin Murphy11febfc2017-03-30 17:56:31 +0100612static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
Will Deacon1463fe42013-07-31 19:21:27 +0100613{
Robin Murphy11febfc2017-03-30 17:56:31 +0100614 void __iomem *base = ARM_SMMU_GR0(smmu);
Will Deacon8e517e72017-07-06 15:55:48 +0100615 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100616
Will Deacon8e517e72017-07-06 15:55:48 +0100617 spin_lock_irqsave(&smmu->global_sync_lock, flags);
Robin Murphy11febfc2017-03-30 17:56:31 +0100618 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
619 base + ARM_SMMU_GR0_sTLBGSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100620 spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000621}
622
Robin Murphy11febfc2017-03-30 17:56:31 +0100623static void arm_smmu_tlb_sync_context(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100624{
Will Deacon518f7132014-11-14 17:17:54 +0000625 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy11febfc2017-03-30 17:56:31 +0100626 struct arm_smmu_device *smmu = smmu_domain->smmu;
627 void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
Will Deacon8e517e72017-07-06 15:55:48 +0100628 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100629
Will Deacon8e517e72017-07-06 15:55:48 +0100630 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy11febfc2017-03-30 17:56:31 +0100631 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
632 base + ARM_SMMU_CB_TLBSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100633 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000634}
635
Robin Murphy11febfc2017-03-30 17:56:31 +0100636static void arm_smmu_tlb_sync_vmid(void *cookie)
637{
638 struct arm_smmu_domain *smmu_domain = cookie;
639
640 arm_smmu_tlb_sync_global(smmu_domain->smmu);
641}
642
643static void arm_smmu_tlb_inv_context_s1(void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000644{
645 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100646 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy11febfc2017-03-30 17:56:31 +0100647 void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
648
649 writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
650 arm_smmu_tlb_sync_context(cookie);
651}
652
653static void arm_smmu_tlb_inv_context_s2(void *cookie)
654{
655 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100656 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy11febfc2017-03-30 17:56:31 +0100657 void __iomem *base = ARM_SMMU_GR0(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100658
Robin Murphy11febfc2017-03-30 17:56:31 +0100659 writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
660 arm_smmu_tlb_sync_global(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100661}
662
Will Deacon518f7132014-11-14 17:17:54 +0000663static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000664 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000665{
666 struct arm_smmu_domain *smmu_domain = cookie;
667 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000668 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Robin Murphy11febfc2017-03-30 17:56:31 +0100669 void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000670
671 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +0000672 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
673
Robin Murphy7602b872016-04-28 17:12:09 +0100674 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000675 iova &= ~12UL;
Robin Murphy280b6832017-03-30 17:56:29 +0100676 iova |= cfg->asid;
Robin Murphy75df1382015-12-07 18:18:52 +0000677 do {
678 writel_relaxed(iova, reg);
679 iova += granule;
680 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000681 } else {
682 iova >>= 12;
Robin Murphy280b6832017-03-30 17:56:29 +0100683 iova |= (u64)cfg->asid << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000684 do {
685 writeq_relaxed(iova, reg);
686 iova += granule >> 12;
687 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000688 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100689 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000690 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
691 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000692 iova >>= 12;
693 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100694 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000695 iova += granule >> 12;
696 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000697 }
698}
699
Robin Murphy11febfc2017-03-30 17:56:31 +0100700/*
701 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
702 * almost negligible, but the benefit of getting the first one in as far ahead
703 * of the sync as possible is significant, hence we don't just make this a
704 * no-op and set .tlb_sync to arm_smmu_inv_context_s2() as you might think.
705 */
706static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
707 size_t granule, bool leaf, void *cookie)
708{
709 struct arm_smmu_domain *smmu_domain = cookie;
710 void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
711
712 writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
713}
714
715static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = {
716 .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
Will Deacon518f7132014-11-14 17:17:54 +0000717 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
Robin Murphy11febfc2017-03-30 17:56:31 +0100718 .tlb_sync = arm_smmu_tlb_sync_context,
719};
720
721static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = {
722 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
723 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
724 .tlb_sync = arm_smmu_tlb_sync_context,
725};
726
727static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = {
728 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
729 .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync,
730 .tlb_sync = arm_smmu_tlb_sync_vmid,
Will Deacon518f7132014-11-14 17:17:54 +0000731};
732
Will Deacon45ae7cf2013-06-24 18:31:25 +0100733static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
734{
Will Deacon3714ce1d2016-08-05 19:49:45 +0100735 u32 fsr, fsynr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100736 unsigned long iova;
737 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100738 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100739 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
740 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100741 void __iomem *cb_base;
742
Robin Murphy452107c2017-03-30 17:56:30 +0100743 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100744 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
745
746 if (!(fsr & FSR_FAULT))
747 return IRQ_NONE;
748
Will Deacon45ae7cf2013-06-24 18:31:25 +0100749 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Robin Murphyf9a05f02016-04-13 18:13:01 +0100750 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100751
Will Deacon3714ce1d2016-08-05 19:49:45 +0100752 dev_err_ratelimited(smmu->dev,
753 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
754 fsr, iova, fsynr, cfg->cbndx);
755
Will Deacon45ae7cf2013-06-24 18:31:25 +0100756 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100757 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100758}
759
760static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
761{
762 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
763 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000764 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100765
766 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
767 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
768 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
769 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
770
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000771 if (!gfsr)
772 return IRQ_NONE;
773
Will Deacon45ae7cf2013-06-24 18:31:25 +0100774 dev_err_ratelimited(smmu->dev,
775 "Unexpected global fault, this could be serious\n");
776 dev_err_ratelimited(smmu->dev,
777 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
778 gfsr, gfsynr0, gfsynr1, gfsynr2);
779
780 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100781 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100782}
783
Will Deacon518f7132014-11-14 17:17:54 +0000784static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
785 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100786{
Will Deacon44680ee2014-06-25 11:29:12 +0100787 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy90df3732017-08-08 14:56:14 +0100788 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
789 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
790
791 cb->cfg = cfg;
792
793 /* TTBCR */
794 if (stage1) {
795 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
796 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
797 } else {
798 cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
799 cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
800 cb->tcr[1] |= TTBCR2_SEP_UPSTREAM;
801 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
802 cb->tcr[1] |= TTBCR2_AS;
803 }
804 } else {
805 cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
806 }
807
808 /* TTBRs */
809 if (stage1) {
810 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
811 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
812 cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
813 } else {
814 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
815 cb->ttbr[0] |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
816 cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
817 cb->ttbr[1] |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
818 }
819 } else {
820 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
821 }
822
823 /* MAIRs (stage-1 only) */
824 if (stage1) {
825 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
826 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
827 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
828 } else {
829 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
830 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
831 }
832 }
833}
834
835static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
836{
837 u32 reg;
838 bool stage1;
839 struct arm_smmu_cb *cb = &smmu->cbs[idx];
840 struct arm_smmu_cfg *cfg = cb->cfg;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100841 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100842
Robin Murphy90df3732017-08-08 14:56:14 +0100843 cb_base = ARM_SMMU_CB(smmu, idx);
844
845 /* Unassigned context banks only need disabling */
846 if (!cfg) {
847 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
848 return;
849 }
850
Will Deacon45ae7cf2013-06-24 18:31:25 +0100851 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100852 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100853
Robin Murphy90df3732017-08-08 14:56:14 +0100854 /* CBA2R */
Will Deacon4a1c93c2015-03-04 12:21:03 +0000855 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100856 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
857 reg = CBA2R_RW64_64BIT;
858 else
859 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800860 /* 16-bit VMIDs live in CBA2R */
861 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Robin Murphy280b6832017-03-30 17:56:29 +0100862 reg |= cfg->vmid << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800863
Robin Murphy90df3732017-08-08 14:56:14 +0100864 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(idx));
Will Deacon4a1c93c2015-03-04 12:21:03 +0000865 }
866
Will Deacon45ae7cf2013-06-24 18:31:25 +0100867 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100868 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100869 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700870 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100871
Will Deacon57ca90f2014-02-06 14:59:05 +0000872 /*
873 * Use the weakest shareability/memory types, so they are
874 * overridden by the ttbcr/pte.
875 */
876 if (stage1) {
877 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
878 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800879 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
880 /* 8-bit VMIDs live in CBAR */
Robin Murphy280b6832017-03-30 17:56:29 +0100881 reg |= cfg->vmid << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000882 }
Robin Murphy90df3732017-08-08 14:56:14 +0100883 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(idx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100884
Sunil Goutham125458a2017-03-28 16:11:12 +0530885 /*
886 * TTBCR
887 * We must write this before the TTBRs, since it determines the
888 * access behaviour of some fields (in particular, ASID[15:8]).
889 */
Robin Murphy90df3732017-08-08 14:56:14 +0100890 if (stage1 && smmu->version > ARM_SMMU_V1)
891 writel_relaxed(cb->tcr[1], cb_base + ARM_SMMU_CB_TTBCR2);
892 writel_relaxed(cb->tcr[0], cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100893
Will Deacon45ae7cf2013-06-24 18:31:25 +0100894 /* TTBRs */
Robin Murphy90df3732017-08-08 14:56:14 +0100895 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
896 writel_relaxed(cfg->asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
897 writel_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0);
898 writel_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100899 } else {
Robin Murphy90df3732017-08-08 14:56:14 +0100900 writeq_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0);
901 if (stage1)
902 writeq_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100903 }
904
Will Deacon518f7132014-11-14 17:17:54 +0000905 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100906 if (stage1) {
Robin Murphy90df3732017-08-08 14:56:14 +0100907 writel_relaxed(cb->mair[0], cb_base + ARM_SMMU_CB_S1_MAIR0);
908 writel_relaxed(cb->mair[1], cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100909 }
910
Will Deacon45ae7cf2013-06-24 18:31:25 +0100911 /* SCTLR */
Robin Murphy60705292016-08-11 17:44:06 +0100912 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100913 if (stage1)
914 reg |= SCTLR_S1_ASIDPNE;
Robin Murphy90df3732017-08-08 14:56:14 +0100915 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
916 reg |= SCTLR_E;
917
Will Deacon25724842013-08-21 13:49:53 +0100918 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100919}
920
921static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100922 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100923{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100924 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000925 unsigned long ias, oas;
926 struct io_pgtable_ops *pgtbl_ops;
927 struct io_pgtable_cfg pgtbl_cfg;
928 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100929 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100930 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy11febfc2017-03-30 17:56:31 +0100931 const struct iommu_gather_ops *tlb_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100932
Will Deacon518f7132014-11-14 17:17:54 +0000933 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100934 if (smmu_domain->smmu)
935 goto out_unlock;
936
Will Deacon61bc6712017-01-06 16:56:03 +0000937 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
938 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
939 smmu_domain->smmu = smmu;
940 goto out_unlock;
941 }
942
Will Deaconc752ce42014-06-25 22:46:31 +0100943 /*
944 * Mapping the requested stage onto what we support is surprisingly
945 * complicated, mainly because the spec allows S1+S2 SMMUs without
946 * support for nested translation. That means we end up with the
947 * following table:
948 *
949 * Requested Supported Actual
950 * S1 N S1
951 * S1 S1+S2 S1
952 * S1 S2 S2
953 * S1 S1 S1
954 * N N N
955 * N S1+S2 S2
956 * N S2 S2
957 * N S1 S1
958 *
959 * Note that you can't actually request stage-2 mappings.
960 */
961 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
962 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
963 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
964 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
965
Robin Murphy7602b872016-04-28 17:12:09 +0100966 /*
967 * Choosing a suitable context format is even more fiddly. Until we
968 * grow some way for the caller to express a preference, and/or move
969 * the decision into the io-pgtable code where it arguably belongs,
970 * just aim for the closest thing to the rest of the system, and hope
971 * that the hardware isn't esoteric enough that we can't assume AArch64
972 * support to be a superset of AArch32 support...
973 */
974 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
975 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100976 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
977 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
978 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
979 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
980 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100981 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
982 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
983 ARM_SMMU_FEAT_FMT_AARCH64_16K |
984 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
985 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
986
987 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
988 ret = -EINVAL;
989 goto out_unlock;
990 }
991
Will Deaconc752ce42014-06-25 22:46:31 +0100992 switch (smmu_domain->stage) {
993 case ARM_SMMU_DOMAIN_S1:
994 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
995 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000996 ias = smmu->va_size;
997 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100998 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000999 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +01001000 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +00001001 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001002 ias = min(ias, 32UL);
1003 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +01001004 } else {
1005 fmt = ARM_V7S;
1006 ias = min(ias, 32UL);
1007 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +01001008 }
Robin Murphy11febfc2017-03-30 17:56:31 +01001009 tlb_ops = &arm_smmu_s1_tlb_ops;
Will Deaconc752ce42014-06-25 22:46:31 +01001010 break;
1011 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001012 /*
1013 * We will likely want to change this if/when KVM gets
1014 * involved.
1015 */
Will Deaconc752ce42014-06-25 22:46:31 +01001016 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001017 cfg->cbar = CBAR_TYPE_S2_TRANS;
1018 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001019 ias = smmu->ipa_size;
1020 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001021 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001022 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001023 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001024 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001025 ias = min(ias, 40UL);
1026 oas = min(oas, 40UL);
1027 }
Robin Murphy11febfc2017-03-30 17:56:31 +01001028 if (smmu->version == ARM_SMMU_V2)
1029 tlb_ops = &arm_smmu_s2_tlb_ops_v2;
1030 else
1031 tlb_ops = &arm_smmu_s2_tlb_ops_v1;
Will Deaconc752ce42014-06-25 22:46:31 +01001032 break;
1033 default:
1034 ret = -EINVAL;
1035 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001036 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001037 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
1038 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001039 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001040 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001041
Will Deacon44680ee2014-06-25 11:29:12 +01001042 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +01001043 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001044 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1045 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001046 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001047 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001048 }
1049
Robin Murphy280b6832017-03-30 17:56:29 +01001050 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
1051 cfg->vmid = cfg->cbndx + 1 + smmu->cavium_id_base;
1052 else
1053 cfg->asid = cfg->cbndx + smmu->cavium_id_base;
1054
Will Deacon518f7132014-11-14 17:17:54 +00001055 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +01001056 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001057 .ias = ias,
1058 .oas = oas,
Robin Murphy11febfc2017-03-30 17:56:31 +01001059 .tlb = tlb_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +01001060 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001061 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001062
Robin Murphy81b3c252017-06-22 16:53:53 +01001063 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
1064 pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
1065
Will Deacon518f7132014-11-14 17:17:54 +00001066 smmu_domain->smmu = smmu;
1067 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
1068 if (!pgtbl_ops) {
1069 ret = -ENOMEM;
1070 goto out_clear_smmu;
1071 }
1072
Robin Murphyd5466352016-05-09 17:20:09 +01001073 /* Update the domain's page sizes to reflect the page table format */
1074 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Robin Murphy455eb7d2016-09-12 17:13:58 +01001075 domain->geometry.aperture_end = (1UL << ias) - 1;
1076 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +00001077
1078 /* Initialise the context bank with our page table cfg */
1079 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
Robin Murphy90df3732017-08-08 14:56:14 +01001080 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +00001081
1082 /*
1083 * Request context fault interrupt. Do this last to avoid the
1084 * handler seeing a half-initialised domain state.
1085 */
Will Deacon44680ee2014-06-25 11:29:12 +01001086 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001087 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
1088 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001089 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001090 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +01001091 cfg->irptndx, irq);
1092 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001093 }
1094
Will Deacon518f7132014-11-14 17:17:54 +00001095 mutex_unlock(&smmu_domain->init_mutex);
1096
1097 /* Publish page table ops for map/unmap */
1098 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001099 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001100
Will Deacon518f7132014-11-14 17:17:54 +00001101out_clear_smmu:
1102 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001103out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001104 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001105 return ret;
1106}
1107
1108static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1109{
Joerg Roedel1d672632015-03-26 13:43:10 +01001110 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001111 struct arm_smmu_device *smmu = smmu_domain->smmu;
1112 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001113 int irq;
1114
Will Deacon61bc6712017-01-06 16:56:03 +00001115 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001116 return;
1117
Will Deacon518f7132014-11-14 17:17:54 +00001118 /*
1119 * Disable the context bank and free the page tables before freeing
1120 * it.
1121 */
Robin Murphy90df3732017-08-08 14:56:14 +01001122 smmu->cbs[cfg->cbndx].cfg = NULL;
1123 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001124
Will Deacon44680ee2014-06-25 11:29:12 +01001125 if (cfg->irptndx != INVALID_IRPTNDX) {
1126 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001127 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001128 }
1129
Markus Elfring44830b02015-11-06 18:32:41 +01001130 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +01001131 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001132}
1133
Joerg Roedel1d672632015-03-26 13:43:10 +01001134static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001135{
1136 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001137
Will Deacon61bc6712017-01-06 16:56:03 +00001138 if (type != IOMMU_DOMAIN_UNMANAGED &&
1139 type != IOMMU_DOMAIN_DMA &&
1140 type != IOMMU_DOMAIN_IDENTITY)
Joerg Roedel1d672632015-03-26 13:43:10 +01001141 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001142 /*
1143 * Allocate the domain and initialise some of its data structures.
1144 * We can't really do anything meaningful until we've added a
1145 * master.
1146 */
1147 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1148 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001149 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001150
Robin Murphy021bb842016-09-14 15:26:46 +01001151 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
1152 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +00001153 kfree(smmu_domain);
1154 return NULL;
1155 }
1156
Will Deacon518f7132014-11-14 17:17:54 +00001157 mutex_init(&smmu_domain->init_mutex);
Robin Murphy523d7422017-06-22 16:53:56 +01001158 spin_lock_init(&smmu_domain->cb_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001159
1160 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001161}
1162
Joerg Roedel1d672632015-03-26 13:43:10 +01001163static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001164{
Joerg Roedel1d672632015-03-26 13:43:10 +01001165 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001166
1167 /*
1168 * Free the domain resources. We assume that all devices have
1169 * already been detached.
1170 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001171 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001172 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001173 kfree(smmu_domain);
1174}
1175
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001176static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1177{
1178 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyf80cd882016-09-14 15:21:39 +01001179 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001180
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001181 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001182 reg |= SMR_VALID;
1183 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1184}
1185
Robin Murphy8e8b2032016-09-12 17:13:50 +01001186static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1187{
1188 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1189 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1190 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1191 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1192
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001193 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
1194 smmu->smrs[idx].valid)
1195 reg |= S2CR_EXIDVALID;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001196 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1197}
1198
1199static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1200{
1201 arm_smmu_write_s2cr(smmu, idx);
1202 if (smmu->smrs)
1203 arm_smmu_write_smr(smmu, idx);
1204}
1205
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001206/*
1207 * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
1208 * should be called after sCR0 is written.
1209 */
1210static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
1211{
1212 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1213 u32 smr;
1214
1215 if (!smmu->smrs)
1216 return;
1217
1218 /*
1219 * SMR.ID bits may not be preserved if the corresponding MASK
1220 * bits are set, so check each one separately. We can reject
1221 * masters later if they try to claim IDs outside these masks.
1222 */
1223 smr = smmu->streamid_mask << SMR_ID_SHIFT;
1224 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1225 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1226 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
1227
1228 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1229 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1230 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1231 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
1232}
1233
Robin Murphy588888a2016-09-12 17:13:54 +01001234static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001235{
1236 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy588888a2016-09-12 17:13:54 +01001237 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001238
Robin Murphy588888a2016-09-12 17:13:54 +01001239 /* Stream indexing is blissfully easy */
1240 if (!smrs)
1241 return id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001242
Robin Murphy588888a2016-09-12 17:13:54 +01001243 /* Validating SMRs is... less so */
1244 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1245 if (!smrs[i].valid) {
1246 /*
1247 * Note the first free entry we come across, which
1248 * we'll claim in the end if nothing else matches.
1249 */
1250 if (free_idx < 0)
1251 free_idx = i;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001252 continue;
1253 }
Robin Murphy588888a2016-09-12 17:13:54 +01001254 /*
1255 * If the new entry is _entirely_ matched by an existing entry,
1256 * then reuse that, with the guarantee that there also cannot
1257 * be any subsequent conflicting entries. In normal use we'd
1258 * expect simply identical entries for this case, but there's
1259 * no harm in accommodating the generalisation.
1260 */
1261 if ((mask & smrs[i].mask) == mask &&
1262 !((id ^ smrs[i].id) & ~smrs[i].mask))
1263 return i;
1264 /*
1265 * If the new entry has any other overlap with an existing one,
1266 * though, then there always exists at least one stream ID
1267 * which would cause a conflict, and we can't allow that risk.
1268 */
1269 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1270 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001271 }
1272
Robin Murphy588888a2016-09-12 17:13:54 +01001273 return free_idx;
1274}
1275
1276static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1277{
1278 if (--smmu->s2crs[idx].count)
1279 return false;
1280
1281 smmu->s2crs[idx] = s2cr_init_val;
1282 if (smmu->smrs)
1283 smmu->smrs[idx].valid = false;
1284
1285 return true;
1286}
1287
1288static int arm_smmu_master_alloc_smes(struct device *dev)
1289{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001290 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1291 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy588888a2016-09-12 17:13:54 +01001292 struct arm_smmu_device *smmu = cfg->smmu;
1293 struct arm_smmu_smr *smrs = smmu->smrs;
1294 struct iommu_group *group;
1295 int i, idx, ret;
1296
1297 mutex_lock(&smmu->stream_map_mutex);
1298 /* Figure out a viable stream map entry allocation */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001299 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy021bb842016-09-14 15:26:46 +01001300 u16 sid = fwspec->ids[i];
1301 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1302
Robin Murphy588888a2016-09-12 17:13:54 +01001303 if (idx != INVALID_SMENDX) {
1304 ret = -EEXIST;
1305 goto out_err;
1306 }
1307
Robin Murphy021bb842016-09-14 15:26:46 +01001308 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy588888a2016-09-12 17:13:54 +01001309 if (ret < 0)
1310 goto out_err;
1311
1312 idx = ret;
1313 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy021bb842016-09-14 15:26:46 +01001314 smrs[idx].id = sid;
1315 smrs[idx].mask = mask;
Robin Murphy588888a2016-09-12 17:13:54 +01001316 smrs[idx].valid = true;
1317 }
1318 smmu->s2crs[idx].count++;
1319 cfg->smendx[i] = (s16)idx;
1320 }
1321
1322 group = iommu_group_get_for_dev(dev);
1323 if (!group)
1324 group = ERR_PTR(-ENOMEM);
1325 if (IS_ERR(group)) {
1326 ret = PTR_ERR(group);
1327 goto out_err;
1328 }
1329 iommu_group_put(group);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001330
Will Deacon45ae7cf2013-06-24 18:31:25 +01001331 /* It worked! Now, poke the actual hardware */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001332 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001333 arm_smmu_write_sme(smmu, idx);
1334 smmu->s2crs[idx].group = group;
1335 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001336
Robin Murphy588888a2016-09-12 17:13:54 +01001337 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001338 return 0;
1339
Robin Murphy588888a2016-09-12 17:13:54 +01001340out_err:
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001341 while (i--) {
Robin Murphy588888a2016-09-12 17:13:54 +01001342 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001343 cfg->smendx[i] = INVALID_SMENDX;
1344 }
Robin Murphy588888a2016-09-12 17:13:54 +01001345 mutex_unlock(&smmu->stream_map_mutex);
1346 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001347}
1348
Robin Murphyadfec2e2016-09-12 17:13:55 +01001349static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001350{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001351 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1352 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphyd3097e32016-09-12 17:13:53 +01001353 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001354
Robin Murphy588888a2016-09-12 17:13:54 +01001355 mutex_lock(&smmu->stream_map_mutex);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001356 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001357 if (arm_smmu_free_sme(smmu, idx))
1358 arm_smmu_write_sme(smmu, idx);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001359 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001360 }
Robin Murphy588888a2016-09-12 17:13:54 +01001361 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001362}
1363
Will Deacon45ae7cf2013-06-24 18:31:25 +01001364static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphyadfec2e2016-09-12 17:13:55 +01001365 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001366{
Will Deacon44680ee2014-06-25 11:29:12 +01001367 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001368 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001369 u8 cbndx = smmu_domain->cfg.cbndx;
Will Deacon61bc6712017-01-06 16:56:03 +00001370 enum arm_smmu_s2cr_type type;
Robin Murphy588888a2016-09-12 17:13:54 +01001371 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001372
Will Deacon61bc6712017-01-06 16:56:03 +00001373 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
1374 type = S2CR_TYPE_BYPASS;
1375 else
1376 type = S2CR_TYPE_TRANS;
1377
Robin Murphyadfec2e2016-09-12 17:13:55 +01001378 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy8e8b2032016-09-12 17:13:50 +01001379 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy588888a2016-09-12 17:13:54 +01001380 continue;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001381
Robin Murphy8e8b2032016-09-12 17:13:50 +01001382 s2cr[idx].type = type;
Sricharan Re1989802017-01-06 18:58:15 +05301383 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001384 s2cr[idx].cbndx = cbndx;
1385 arm_smmu_write_s2cr(smmu, idx);
Will Deacon43b412b2014-07-15 11:22:24 +01001386 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001387 return 0;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001388}
1389
Will Deacon45ae7cf2013-06-24 18:31:25 +01001390static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1391{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001392 int ret;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001393 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1394 struct arm_smmu_device *smmu;
Joerg Roedel1d672632015-03-26 13:43:10 +01001395 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001396
Robin Murphyadfec2e2016-09-12 17:13:55 +01001397 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001398 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1399 return -ENXIO;
1400 }
1401
Robin Murphyfba4f8e2016-10-17 12:06:21 +01001402 /*
1403 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1404 * domains between of_xlate() and add_device() - we have no way to cope
1405 * with that, so until ARM gets converted to rely on groups and default
1406 * domains, just say no (but more politely than by dereferencing NULL).
1407 * This should be at least a WARN_ON once that's sorted.
1408 */
1409 if (!fwspec->iommu_priv)
1410 return -ENODEV;
1411
Robin Murphyadfec2e2016-09-12 17:13:55 +01001412 smmu = fwspec_smmu(fwspec);
Will Deacon518f7132014-11-14 17:17:54 +00001413 /* Ensure that the domain is finalised */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001414 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001415 if (ret < 0)
Will Deacon518f7132014-11-14 17:17:54 +00001416 return ret;
1417
Will Deacon45ae7cf2013-06-24 18:31:25 +01001418 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001419 * Sanity check the domain. We don't support domains across
1420 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001421 */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001422 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001423 dev_err(dev,
1424 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphyadfec2e2016-09-12 17:13:55 +01001425 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001426 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001427 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001428
1429 /* Looks ok, so add the device to the domain */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001430 return arm_smmu_domain_add_master(smmu_domain, fwspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001431}
1432
Will Deacon45ae7cf2013-06-24 18:31:25 +01001433static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001434 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001435{
Robin Murphy523d7422017-06-22 16:53:56 +01001436 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001437
Will Deacon518f7132014-11-14 17:17:54 +00001438 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001439 return -ENODEV;
1440
Robin Murphy523d7422017-06-22 16:53:56 +01001441 return ops->map(ops, iova, paddr, size, prot);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001442}
1443
1444static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1445 size_t size)
1446{
Robin Murphy523d7422017-06-22 16:53:56 +01001447 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001448
Will Deacon518f7132014-11-14 17:17:54 +00001449 if (!ops)
1450 return 0;
1451
Robin Murphy523d7422017-06-22 16:53:56 +01001452 return ops->unmap(ops, iova, size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001453}
1454
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001455static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1456 dma_addr_t iova)
1457{
Joerg Roedel1d672632015-03-26 13:43:10 +01001458 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001459 struct arm_smmu_device *smmu = smmu_domain->smmu;
1460 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1461 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1462 struct device *dev = smmu->dev;
1463 void __iomem *cb_base;
1464 u32 tmp;
1465 u64 phys;
Robin Murphy523d7422017-06-22 16:53:56 +01001466 unsigned long va, flags;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001467
Robin Murphy452107c2017-03-30 17:56:30 +01001468 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001469
Robin Murphy523d7422017-06-22 16:53:56 +01001470 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy661d9622015-05-27 17:09:34 +01001471 /* ATS1 registers can only be written atomically */
1472 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001473 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001474 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1475 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001476 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001477
1478 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1479 !(tmp & ATSR_ACTIVE), 5, 50)) {
Robin Murphy523d7422017-06-22 16:53:56 +01001480 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001481 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001482 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001483 &iova);
1484 return ops->iova_to_phys(ops, iova);
1485 }
1486
Robin Murphyf9a05f02016-04-13 18:13:01 +01001487 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Robin Murphy523d7422017-06-22 16:53:56 +01001488 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001489 if (phys & CB_PAR_F) {
1490 dev_err(dev, "translation fault!\n");
1491 dev_err(dev, "PAR = 0x%llx\n", phys);
1492 return 0;
1493 }
1494
1495 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1496}
1497
Will Deacon45ae7cf2013-06-24 18:31:25 +01001498static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001499 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001500{
Joerg Roedel1d672632015-03-26 13:43:10 +01001501 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Robin Murphy523d7422017-06-22 16:53:56 +01001502 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001503
Sunil Gouthambdf95922017-04-25 15:27:52 +05301504 if (domain->type == IOMMU_DOMAIN_IDENTITY)
1505 return iova;
1506
Will Deacon518f7132014-11-14 17:17:54 +00001507 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001508 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001509
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001510 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
Robin Murphy523d7422017-06-22 16:53:56 +01001511 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1512 return arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001513
Robin Murphy523d7422017-06-22 16:53:56 +01001514 return ops->iova_to_phys(ops, iova);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001515}
1516
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001517static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001518{
Will Deacond0948942014-06-24 17:30:10 +01001519 switch (cap) {
1520 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001521 /*
1522 * Return true here as the SMMU can always send out coherent
1523 * requests.
1524 */
1525 return true;
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001526 case IOMMU_CAP_NOEXEC:
1527 return true;
Will Deacond0948942014-06-24 17:30:10 +01001528 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001529 return false;
Will Deacond0948942014-06-24 17:30:10 +01001530 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001531}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001532
Robin Murphy021bb842016-09-14 15:26:46 +01001533static int arm_smmu_match_node(struct device *dev, void *data)
1534{
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001535 return dev->fwnode == data;
Robin Murphy021bb842016-09-14 15:26:46 +01001536}
1537
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001538static
1539struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
Robin Murphy021bb842016-09-14 15:26:46 +01001540{
1541 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001542 fwnode, arm_smmu_match_node);
Robin Murphy021bb842016-09-14 15:26:46 +01001543 put_device(dev);
1544 return dev ? dev_get_drvdata(dev) : NULL;
1545}
1546
Will Deacon03edb222015-01-19 14:27:33 +00001547static int arm_smmu_add_device(struct device *dev)
1548{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001549 struct arm_smmu_device *smmu;
Robin Murphyf80cd882016-09-14 15:21:39 +01001550 struct arm_smmu_master_cfg *cfg;
Robin Murphy021bb842016-09-14 15:26:46 +01001551 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphyf80cd882016-09-14 15:21:39 +01001552 int i, ret;
1553
Robin Murphy021bb842016-09-14 15:26:46 +01001554 if (using_legacy_binding) {
1555 ret = arm_smmu_register_legacy_master(dev, &smmu);
Robin Murphy021bb842016-09-14 15:26:46 +01001556 if (ret)
1557 goto out_free;
Robin Murphy3c117b52016-11-02 17:31:32 +00001558 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001559 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
Robin Murphy021bb842016-09-14 15:26:46 +01001560 } else {
1561 return -ENODEV;
1562 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001563
1564 ret = -EINVAL;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001565 for (i = 0; i < fwspec->num_ids; i++) {
1566 u16 sid = fwspec->ids[i];
Robin Murphy021bb842016-09-14 15:26:46 +01001567 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyf80cd882016-09-14 15:21:39 +01001568
Robin Murphyadfec2e2016-09-12 17:13:55 +01001569 if (sid & ~smmu->streamid_mask) {
Robin Murphyf80cd882016-09-14 15:21:39 +01001570 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy021bb842016-09-14 15:26:46 +01001571 sid, smmu->streamid_mask);
1572 goto out_free;
1573 }
1574 if (mask & ~smmu->smr_mask_mask) {
1575 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
Peng Fan6323f472017-04-21 17:03:36 +08001576 mask, smmu->smr_mask_mask);
Robin Murphyf80cd882016-09-14 15:21:39 +01001577 goto out_free;
1578 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001579 }
Will Deacon03edb222015-01-19 14:27:33 +00001580
Robin Murphyadfec2e2016-09-12 17:13:55 +01001581 ret = -ENOMEM;
1582 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1583 GFP_KERNEL);
1584 if (!cfg)
1585 goto out_free;
1586
1587 cfg->smmu = smmu;
1588 fwspec->iommu_priv = cfg;
1589 while (i--)
1590 cfg->smendx[i] = INVALID_SMENDX;
1591
Robin Murphy588888a2016-09-12 17:13:54 +01001592 ret = arm_smmu_master_alloc_smes(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001593 if (ret)
Vivek Gautamc54451a2017-07-06 15:07:00 +05301594 goto out_cfg_free;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001595
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001596 iommu_device_link(&smmu->iommu, dev);
1597
Robin Murphyadfec2e2016-09-12 17:13:55 +01001598 return 0;
Robin Murphyf80cd882016-09-14 15:21:39 +01001599
Vivek Gautamc54451a2017-07-06 15:07:00 +05301600out_cfg_free:
1601 kfree(cfg);
Robin Murphyf80cd882016-09-14 15:21:39 +01001602out_free:
Robin Murphyadfec2e2016-09-12 17:13:55 +01001603 iommu_fwspec_free(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001604 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00001605}
1606
Will Deacon45ae7cf2013-06-24 18:31:25 +01001607static void arm_smmu_remove_device(struct device *dev)
1608{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001609 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001610 struct arm_smmu_master_cfg *cfg;
1611 struct arm_smmu_device *smmu;
1612
Robin Murphy8e8b2032016-09-12 17:13:50 +01001613
Robin Murphyadfec2e2016-09-12 17:13:55 +01001614 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyf80cd882016-09-14 15:21:39 +01001615 return;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001616
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001617 cfg = fwspec->iommu_priv;
1618 smmu = cfg->smmu;
1619
1620 iommu_device_unlink(&smmu->iommu, dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001621 arm_smmu_master_free_smes(fwspec);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001622 iommu_group_remove_device(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001623 kfree(fwspec->iommu_priv);
1624 iommu_fwspec_free(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001625}
1626
Joerg Roedelaf659932015-10-21 23:51:41 +02001627static struct iommu_group *arm_smmu_device_group(struct device *dev)
1628{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001629 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1630 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy588888a2016-09-12 17:13:54 +01001631 struct iommu_group *group = NULL;
1632 int i, idx;
1633
Robin Murphyadfec2e2016-09-12 17:13:55 +01001634 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001635 if (group && smmu->s2crs[idx].group &&
1636 group != smmu->s2crs[idx].group)
1637 return ERR_PTR(-EINVAL);
1638
1639 group = smmu->s2crs[idx].group;
1640 }
1641
1642 if (group)
Robin Murphye1b44cb2016-11-11 17:59:22 +00001643 return iommu_group_ref_get(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001644
1645 if (dev_is_pci(dev))
1646 group = pci_device_group(dev);
1647 else
1648 group = generic_device_group(dev);
1649
Joerg Roedelaf659932015-10-21 23:51:41 +02001650 return group;
1651}
1652
Will Deaconc752ce42014-06-25 22:46:31 +01001653static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1654 enum iommu_attr attr, void *data)
1655{
Joerg Roedel1d672632015-03-26 13:43:10 +01001656 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001657
Will Deacon0834cc22017-01-06 16:28:17 +00001658 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
1659 return -EINVAL;
1660
Will Deaconc752ce42014-06-25 22:46:31 +01001661 switch (attr) {
1662 case DOMAIN_ATTR_NESTING:
1663 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1664 return 0;
1665 default:
1666 return -ENODEV;
1667 }
1668}
1669
1670static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1671 enum iommu_attr attr, void *data)
1672{
Will Deacon518f7132014-11-14 17:17:54 +00001673 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001674 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001675
Will Deacon0834cc22017-01-06 16:28:17 +00001676 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
1677 return -EINVAL;
1678
Will Deacon518f7132014-11-14 17:17:54 +00001679 mutex_lock(&smmu_domain->init_mutex);
1680
Will Deaconc752ce42014-06-25 22:46:31 +01001681 switch (attr) {
1682 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00001683 if (smmu_domain->smmu) {
1684 ret = -EPERM;
1685 goto out_unlock;
1686 }
1687
Will Deaconc752ce42014-06-25 22:46:31 +01001688 if (*(int *)data)
1689 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1690 else
1691 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1692
Will Deacon518f7132014-11-14 17:17:54 +00001693 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001694 default:
Will Deacon518f7132014-11-14 17:17:54 +00001695 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01001696 }
Will Deacon518f7132014-11-14 17:17:54 +00001697
1698out_unlock:
1699 mutex_unlock(&smmu_domain->init_mutex);
1700 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001701}
1702
Robin Murphy021bb842016-09-14 15:26:46 +01001703static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1704{
Robin Murphy56fbf602017-03-31 12:03:33 +01001705 u32 mask, fwid = 0;
Robin Murphy021bb842016-09-14 15:26:46 +01001706
1707 if (args->args_count > 0)
1708 fwid |= (u16)args->args[0];
1709
1710 if (args->args_count > 1)
1711 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
Robin Murphy56fbf602017-03-31 12:03:33 +01001712 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
1713 fwid |= (u16)mask << SMR_MASK_SHIFT;
Robin Murphy021bb842016-09-14 15:26:46 +01001714
1715 return iommu_fwspec_add_ids(dev, &fwid, 1);
1716}
1717
Eric Augerf3ebee82017-01-19 20:57:55 +00001718static void arm_smmu_get_resv_regions(struct device *dev,
1719 struct list_head *head)
1720{
1721 struct iommu_resv_region *region;
1722 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1723
1724 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00001725 prot, IOMMU_RESV_SW_MSI);
Eric Augerf3ebee82017-01-19 20:57:55 +00001726 if (!region)
1727 return;
1728
1729 list_add_tail(&region->list, head);
Robin Murphy273df962017-03-16 17:00:19 +00001730
1731 iommu_dma_get_resv_regions(dev, head);
Eric Augerf3ebee82017-01-19 20:57:55 +00001732}
1733
1734static void arm_smmu_put_resv_regions(struct device *dev,
1735 struct list_head *head)
1736{
1737 struct iommu_resv_region *entry, *next;
1738
1739 list_for_each_entry_safe(entry, next, head, list)
1740 kfree(entry);
1741}
1742
Will Deacon518f7132014-11-14 17:17:54 +00001743static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001744 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001745 .domain_alloc = arm_smmu_domain_alloc,
1746 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001747 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001748 .map = arm_smmu_map,
1749 .unmap = arm_smmu_unmap,
Joerg Roedel76771c92014-12-02 13:07:13 +01001750 .map_sg = default_iommu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01001751 .iova_to_phys = arm_smmu_iova_to_phys,
1752 .add_device = arm_smmu_add_device,
1753 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001754 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001755 .domain_get_attr = arm_smmu_domain_get_attr,
1756 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy021bb842016-09-14 15:26:46 +01001757 .of_xlate = arm_smmu_of_xlate,
Eric Augerf3ebee82017-01-19 20:57:55 +00001758 .get_resv_regions = arm_smmu_get_resv_regions,
1759 .put_resv_regions = arm_smmu_put_resv_regions,
Will Deacon518f7132014-11-14 17:17:54 +00001760 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001761};
1762
1763static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1764{
1765 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001766 int i;
Peng Fan3ca37122016-05-03 21:50:30 +08001767 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001768
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001769 /* clear global FSR */
1770 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1771 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001772
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001773 /*
1774 * Reset stream mapping groups: Initial values mark all SMRn as
1775 * invalid and all S2CRn as bypass unless overridden.
1776 */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001777 for (i = 0; i < smmu->num_mapping_groups; ++i)
1778 arm_smmu_write_sme(smmu, i);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001779
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301780 if (smmu->model == ARM_MMU500) {
1781 /*
1782 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1783 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1784 * bit is only present in MMU-500r2 onwards.
1785 */
1786 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1787 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
Peng Fan3ca37122016-05-03 21:50:30 +08001788 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301789 if (major >= 2)
1790 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1791 /*
1792 * Allow unmatched Stream IDs to allocate bypass
1793 * TLB entries for reduced latency.
1794 */
1795 reg |= ARM_MMU500_ACR_SMTNMB_TLBEN;
Peng Fan3ca37122016-05-03 21:50:30 +08001796 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1797 }
1798
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001799 /* Make sure all context banks are disabled and clear CB_FSR */
1800 for (i = 0; i < smmu->num_context_banks; ++i) {
Robin Murphy90df3732017-08-08 14:56:14 +01001801 void __iomem *cb_base = ARM_SMMU_CB(smmu, i);
1802
1803 arm_smmu_write_context_bank(smmu, i);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001804 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001805 /*
1806 * Disable MMU-500's not-particularly-beneficial next-page
1807 * prefetcher for the sake of errata #841119 and #826419.
1808 */
1809 if (smmu->model == ARM_MMU500) {
1810 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1811 reg &= ~ARM_MMU500_ACTLR_CPRE;
1812 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1813 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001814 }
Will Deacon1463fe42013-07-31 19:21:27 +01001815
Will Deacon45ae7cf2013-06-24 18:31:25 +01001816 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001817 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1818 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1819
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001820 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001821
Will Deacon45ae7cf2013-06-24 18:31:25 +01001822 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001823 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001824
1825 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001826 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001827
Robin Murphy25a1c962016-02-10 14:25:33 +00001828 /* Enable client access, handling unmatched streams as appropriate */
1829 reg &= ~sCR0_CLIENTPD;
1830 if (disable_bypass)
1831 reg |= sCR0_USFCFG;
1832 else
1833 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001834
1835 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001836 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001837
1838 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001839 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001840
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001841 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1842 reg |= sCR0_VMID16EN;
1843
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001844 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1845 reg |= sCR0_EXIDENABLE;
1846
Will Deacon45ae7cf2013-06-24 18:31:25 +01001847 /* Push the button */
Robin Murphy11febfc2017-03-30 17:56:31 +01001848 arm_smmu_tlb_sync_global(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001849 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001850}
1851
1852static int arm_smmu_id_size_to_bits(int size)
1853{
1854 switch (size) {
1855 case 0:
1856 return 32;
1857 case 1:
1858 return 36;
1859 case 2:
1860 return 40;
1861 case 3:
1862 return 42;
1863 case 4:
1864 return 44;
1865 case 5:
1866 default:
1867 return 48;
1868 }
1869}
1870
1871static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1872{
1873 unsigned long size;
1874 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1875 u32 id;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001876 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001877 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001878
1879 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001880 dev_notice(smmu->dev, "SMMUv%d with:\n",
1881 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001882
1883 /* ID0 */
1884 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001885
1886 /* Restrict available stages based on module parameter */
1887 if (force_stage == 1)
1888 id &= ~(ID0_S2TS | ID0_NTS);
1889 else if (force_stage == 2)
1890 id &= ~(ID0_S1TS | ID0_NTS);
1891
Will Deacon45ae7cf2013-06-24 18:31:25 +01001892 if (id & ID0_S1TS) {
1893 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1894 dev_notice(smmu->dev, "\tstage 1 translation\n");
1895 }
1896
1897 if (id & ID0_S2TS) {
1898 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1899 dev_notice(smmu->dev, "\tstage 2 translation\n");
1900 }
1901
1902 if (id & ID0_NTS) {
1903 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1904 dev_notice(smmu->dev, "\tnested translation\n");
1905 }
1906
1907 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001908 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001909 dev_err(smmu->dev, "\tno translation support!\n");
1910 return -ENODEV;
1911 }
1912
Robin Murphyb7862e32016-04-13 18:13:03 +01001913 if ((id & ID0_S1TS) &&
1914 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001915 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1916 dev_notice(smmu->dev, "\taddress translation ops\n");
1917 }
1918
Robin Murphybae2c2d2015-07-29 19:46:05 +01001919 /*
1920 * In order for DMA API calls to work properly, we must defer to what
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001921 * the FW says about coherency, regardless of what the hardware claims.
Robin Murphybae2c2d2015-07-29 19:46:05 +01001922 * Fortunately, this also opens up a workaround for systems where the
1923 * ID register value has ended up configured incorrectly.
1924 */
Robin Murphybae2c2d2015-07-29 19:46:05 +01001925 cttw_reg = !!(id & ID0_CTTW);
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001926 if (cttw_fw || cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001927 dev_notice(smmu->dev, "\t%scoherent table walk\n",
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001928 cttw_fw ? "" : "non-");
1929 if (cttw_fw != cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001930 dev_notice(smmu->dev,
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001931 "\t(IDR0.CTTW overridden by FW configuration)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001932
Robin Murphy21174242016-09-12 17:13:48 +01001933 /* Max. number of entries we have for stream matching/indexing */
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001934 if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
1935 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1936 size = 1 << 16;
1937 } else {
1938 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
1939 }
Robin Murphy21174242016-09-12 17:13:48 +01001940 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001941 if (id & ID0_SMS) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001942 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy21174242016-09-12 17:13:48 +01001943 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
1944 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001945 dev_err(smmu->dev,
1946 "stream-matching supported, but no SMRs present!\n");
1947 return -ENODEV;
1948 }
1949
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001950 /* Zero-initialised to mark as invalid */
1951 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1952 GFP_KERNEL);
1953 if (!smmu->smrs)
1954 return -ENOMEM;
1955
Will Deacon45ae7cf2013-06-24 18:31:25 +01001956 dev_notice(smmu->dev,
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001957 "\tstream matching with %lu register groups", size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001958 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001959 /* s2cr->type == 0 means translation, so initialise explicitly */
1960 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1961 GFP_KERNEL);
1962 if (!smmu->s2crs)
1963 return -ENOMEM;
1964 for (i = 0; i < size; i++)
1965 smmu->s2crs[i] = s2cr_init_val;
1966
Robin Murphy21174242016-09-12 17:13:48 +01001967 smmu->num_mapping_groups = size;
Robin Murphy588888a2016-09-12 17:13:54 +01001968 mutex_init(&smmu->stream_map_mutex);
Will Deacon8e517e72017-07-06 15:55:48 +01001969 spin_lock_init(&smmu->global_sync_lock);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001970
Robin Murphy7602b872016-04-28 17:12:09 +01001971 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1972 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1973 if (!(id & ID0_PTFS_NO_AARCH32S))
1974 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1975 }
1976
Will Deacon45ae7cf2013-06-24 18:31:25 +01001977 /* ID1 */
1978 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001979 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001980
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001981 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001982 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Robin Murphy452107c2017-03-30 17:56:30 +01001983 size <<= smmu->pgshift;
1984 if (smmu->cb_base != gr0_base + size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001985 dev_warn(smmu->dev,
Robin Murphy452107c2017-03-30 17:56:30 +01001986 "SMMU address space size (0x%lx) differs from mapped region size (0x%tx)!\n",
1987 size * 2, (smmu->cb_base - gr0_base) * 2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001988
Will Deacon518f7132014-11-14 17:17:54 +00001989 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001990 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1991 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1992 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1993 return -ENODEV;
1994 }
1995 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1996 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001997 /*
1998 * Cavium CN88xx erratum #27704.
1999 * Ensure ASID and VMID allocation is unique across all SMMUs in
2000 * the system.
2001 */
2002 if (smmu->model == CAVIUM_SMMUV2) {
2003 smmu->cavium_id_base =
2004 atomic_add_return(smmu->num_context_banks,
2005 &cavium_smmu_context_count);
2006 smmu->cavium_id_base -= smmu->num_context_banks;
Robert Richter53c35dce2017-03-13 11:39:01 +01002007 dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
Robin Murphye086d912016-04-13 18:12:58 +01002008 }
Robin Murphy90df3732017-08-08 14:56:14 +01002009 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
2010 sizeof(*smmu->cbs), GFP_KERNEL);
2011 if (!smmu->cbs)
2012 return -ENOMEM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002013
2014 /* ID2 */
2015 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
2016 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00002017 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002018
Will Deacon518f7132014-11-14 17:17:54 +00002019 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01002020 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00002021 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002022
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08002023 if (id & ID2_VMID16)
2024 smmu->features |= ARM_SMMU_FEAT_VMID16;
2025
Robin Murphyf1d84542015-03-04 16:41:05 +00002026 /*
2027 * What the page table walker can address actually depends on which
2028 * descriptor format is in use, but since a) we don't know that yet,
2029 * and b) it can vary per context bank, this will have to do...
2030 */
2031 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
2032 dev_warn(smmu->dev,
2033 "failed to set DMA mask for table walker\n");
2034
Robin Murphyb7862e32016-04-13 18:13:03 +01002035 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00002036 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01002037 if (smmu->version == ARM_SMMU_V1_64K)
2038 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002039 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002040 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00002041 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00002042 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01002043 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00002044 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01002045 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00002046 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01002047 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002048 }
2049
Robin Murphy7602b872016-04-28 17:12:09 +01002050 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01002051 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01002052 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01002053 if (smmu->features &
2054 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01002055 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01002056 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01002057 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01002058 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01002059 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01002060
Robin Murphyd5466352016-05-09 17:20:09 +01002061 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2062 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2063 else
2064 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2065 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
2066 smmu->pgsize_bitmap);
2067
Will Deacon518f7132014-11-14 17:17:54 +00002068
Will Deacon28d60072014-09-01 16:24:48 +01002069 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
2070 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002071 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002072
2073 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
2074 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002075 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002076
Will Deacon45ae7cf2013-06-24 18:31:25 +01002077 return 0;
2078}
2079
Robin Murphy67b65a32016-04-13 18:12:57 +01002080struct arm_smmu_match_data {
2081 enum arm_smmu_arch_version version;
2082 enum arm_smmu_implementation model;
2083};
2084
2085#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
2086static struct arm_smmu_match_data name = { .version = ver, .model = imp }
2087
2088ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
2089ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01002090ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002091ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01002092ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01002093
Joerg Roedel09b52692014-10-02 12:24:45 +02002094static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01002095 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
2096 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
2097 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01002098 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002099 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01002100 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01002101 { },
2102};
2103MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2104
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002105#ifdef CONFIG_ACPI
2106static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
2107{
2108 int ret = 0;
2109
2110 switch (model) {
2111 case ACPI_IORT_SMMU_V1:
2112 case ACPI_IORT_SMMU_CORELINK_MMU400:
2113 smmu->version = ARM_SMMU_V1;
2114 smmu->model = GENERIC_SMMU;
2115 break;
Robin Murphy84c24372017-06-19 16:41:56 +01002116 case ACPI_IORT_SMMU_CORELINK_MMU401:
2117 smmu->version = ARM_SMMU_V1_64K;
2118 smmu->model = GENERIC_SMMU;
2119 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002120 case ACPI_IORT_SMMU_V2:
2121 smmu->version = ARM_SMMU_V2;
2122 smmu->model = GENERIC_SMMU;
2123 break;
2124 case ACPI_IORT_SMMU_CORELINK_MMU500:
2125 smmu->version = ARM_SMMU_V2;
2126 smmu->model = ARM_MMU500;
2127 break;
Robin Murphy84c24372017-06-19 16:41:56 +01002128 case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
2129 smmu->version = ARM_SMMU_V2;
2130 smmu->model = CAVIUM_SMMUV2;
2131 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002132 default:
2133 ret = -ENODEV;
2134 }
2135
2136 return ret;
2137}
2138
2139static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2140 struct arm_smmu_device *smmu)
2141{
2142 struct device *dev = smmu->dev;
2143 struct acpi_iort_node *node =
2144 *(struct acpi_iort_node **)dev_get_platdata(dev);
2145 struct acpi_iort_smmu *iort_smmu;
2146 int ret;
2147
2148 /* Retrieve SMMU1/2 specific data */
2149 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
2150
2151 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
2152 if (ret < 0)
2153 return ret;
2154
2155 /* Ignore the configuration access interrupt */
2156 smmu->num_global_irqs = 1;
2157
2158 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
2159 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2160
2161 return 0;
2162}
2163#else
2164static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2165 struct arm_smmu_device *smmu)
2166{
2167 return -ENODEV;
2168}
2169#endif
2170
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002171static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2172 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002173{
Robin Murphy67b65a32016-04-13 18:12:57 +01002174 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002175 struct device *dev = &pdev->dev;
Robin Murphy021bb842016-09-14 15:26:46 +01002176 bool legacy_binding;
2177
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002178 if (of_property_read_u32(dev->of_node, "#global-interrupts",
2179 &smmu->num_global_irqs)) {
2180 dev_err(dev, "missing #global-interrupts property\n");
2181 return -ENODEV;
2182 }
2183
2184 data = of_device_get_match_data(dev);
2185 smmu->version = data->version;
2186 smmu->model = data->model;
2187
2188 parse_driver_options(smmu);
2189
Robin Murphy021bb842016-09-14 15:26:46 +01002190 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
2191 if (legacy_binding && !using_generic_binding) {
2192 if (!using_legacy_binding)
2193 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
2194 using_legacy_binding = true;
2195 } else if (!legacy_binding && !using_legacy_binding) {
2196 using_generic_binding = true;
2197 } else {
2198 dev_err(dev, "not probing due to mismatched DT properties\n");
2199 return -ENODEV;
2200 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002201
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002202 if (of_dma_is_coherent(dev->of_node))
2203 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2204
2205 return 0;
2206}
2207
Robin Murphyf6810c12017-04-10 16:51:05 +05302208static void arm_smmu_bus_init(void)
2209{
2210 /* Oh, for a proper bus abstraction */
2211 if (!iommu_present(&platform_bus_type))
2212 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2213#ifdef CONFIG_ARM_AMBA
2214 if (!iommu_present(&amba_bustype))
2215 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2216#endif
2217#ifdef CONFIG_PCI
2218 if (!iommu_present(&pci_bus_type)) {
2219 pci_request_acs();
2220 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2221 }
2222#endif
2223}
2224
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002225static int arm_smmu_device_probe(struct platform_device *pdev)
2226{
2227 struct resource *res;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002228 resource_size_t ioaddr;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002229 struct arm_smmu_device *smmu;
2230 struct device *dev = &pdev->dev;
2231 int num_irqs, i, err;
2232
Will Deacon45ae7cf2013-06-24 18:31:25 +01002233 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2234 if (!smmu) {
2235 dev_err(dev, "failed to allocate arm_smmu_device\n");
2236 return -ENOMEM;
2237 }
2238 smmu->dev = dev;
2239
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002240 if (dev->of_node)
2241 err = arm_smmu_device_dt_probe(pdev, smmu);
2242 else
2243 err = arm_smmu_device_acpi_probe(pdev, smmu);
2244
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002245 if (err)
2246 return err;
Robin Murphy09360402014-08-28 17:51:59 +01002247
Will Deacon45ae7cf2013-06-24 18:31:25 +01002248 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002249 ioaddr = res->start;
Julia Lawall8a7f4312013-08-19 12:20:37 +01002250 smmu->base = devm_ioremap_resource(dev, res);
2251 if (IS_ERR(smmu->base))
2252 return PTR_ERR(smmu->base);
Robin Murphy452107c2017-03-30 17:56:30 +01002253 smmu->cb_base = smmu->base + resource_size(res) / 2;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002254
Will Deacon45ae7cf2013-06-24 18:31:25 +01002255 num_irqs = 0;
2256 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2257 num_irqs++;
2258 if (num_irqs > smmu->num_global_irqs)
2259 smmu->num_context_irqs++;
2260 }
2261
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002262 if (!smmu->num_context_irqs) {
2263 dev_err(dev, "found %d interrupts but expected at least %d\n",
2264 num_irqs, smmu->num_global_irqs + 1);
2265 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002266 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002267
2268 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
2269 GFP_KERNEL);
2270 if (!smmu->irqs) {
2271 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2272 return -ENOMEM;
2273 }
2274
2275 for (i = 0; i < num_irqs; ++i) {
2276 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002277
Will Deacon45ae7cf2013-06-24 18:31:25 +01002278 if (irq < 0) {
2279 dev_err(dev, "failed to get irq index %d\n", i);
2280 return -ENODEV;
2281 }
2282 smmu->irqs[i] = irq;
2283 }
2284
Olav Haugan3c8766d2014-08-22 17:12:32 -07002285 err = arm_smmu_device_cfg_probe(smmu);
2286 if (err)
2287 return err;
2288
Robin Murphyb7862e32016-04-13 18:13:03 +01002289 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01002290 smmu->num_context_banks != smmu->num_context_irqs) {
2291 dev_err(dev,
2292 "found only %d context interrupt(s) but %d required\n",
2293 smmu->num_context_irqs, smmu->num_context_banks);
Robin Murphyf80cd882016-09-14 15:21:39 +01002294 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002295 }
2296
Will Deacon45ae7cf2013-06-24 18:31:25 +01002297 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08002298 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2299 arm_smmu_global_fault,
2300 IRQF_SHARED,
2301 "arm-smmu global fault",
2302 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002303 if (err) {
2304 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2305 i, smmu->irqs[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01002306 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002307 }
2308 }
2309
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002310 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2311 "smmu.%pa", &ioaddr);
2312 if (err) {
2313 dev_err(dev, "Failed to register iommu in sysfs\n");
2314 return err;
2315 }
2316
2317 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2318 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2319
2320 err = iommu_device_register(&smmu->iommu);
2321 if (err) {
2322 dev_err(dev, "Failed to register iommu\n");
2323 return err;
2324 }
2325
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002326 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01002327 arm_smmu_device_reset(smmu);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03002328 arm_smmu_test_smr_masks(smmu);
Robin Murphy021bb842016-09-14 15:26:46 +01002329
Robin Murphyf6810c12017-04-10 16:51:05 +05302330 /*
2331 * For ACPI and generic DT bindings, an SMMU will be probed before
2332 * any device which might need it, so we want the bus ops in place
2333 * ready to handle default domain setup as soon as any SMMU exists.
2334 */
2335 if (!using_legacy_binding)
2336 arm_smmu_bus_init();
2337
Will Deacon45ae7cf2013-06-24 18:31:25 +01002338 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002339}
2340
Robin Murphyf6810c12017-04-10 16:51:05 +05302341/*
2342 * With the legacy DT binding in play, though, we have no guarantees about
2343 * probe order, but then we're also not doing default domains, so we can
2344 * delay setting bus ops until we're sure every possible SMMU is ready,
2345 * and that way ensure that no add_device() calls get missed.
2346 */
2347static int arm_smmu_legacy_bus_init(void)
2348{
2349 if (using_legacy_binding)
2350 arm_smmu_bus_init();
2351 return 0;
2352}
2353device_initcall_sync(arm_smmu_legacy_bus_init);
2354
Will Deacon45ae7cf2013-06-24 18:31:25 +01002355static int arm_smmu_device_remove(struct platform_device *pdev)
2356{
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002357 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002358
2359 if (!smmu)
2360 return -ENODEV;
2361
Will Deaconecfadb62013-07-31 19:21:28 +01002362 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002363 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002364
Will Deacon45ae7cf2013-06-24 18:31:25 +01002365 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002366 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002367 return 0;
2368}
2369
Nate Watterson7aa86192017-06-29 18:18:15 -04002370static void arm_smmu_device_shutdown(struct platform_device *pdev)
2371{
2372 arm_smmu_device_remove(pdev);
2373}
2374
Will Deacon45ae7cf2013-06-24 18:31:25 +01002375static struct platform_driver arm_smmu_driver = {
2376 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002377 .name = "arm-smmu",
2378 .of_match_table = of_match_ptr(arm_smmu_of_match),
2379 },
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002380 .probe = arm_smmu_device_probe,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002381 .remove = arm_smmu_device_remove,
Nate Watterson7aa86192017-06-29 18:18:15 -04002382 .shutdown = arm_smmu_device_shutdown,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002383};
Robin Murphyf6810c12017-04-10 16:51:05 +05302384module_platform_driver(arm_smmu_driver);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002385
Robin Murphyf6810c12017-04-10 16:51:05 +05302386IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", NULL);
2387IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", NULL);
2388IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", NULL);
2389IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", NULL);
2390IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", NULL);
2391IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", NULL);
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002392
Will Deacon45ae7cf2013-06-24 18:31:25 +01002393MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2394MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2395MODULE_LICENSE("GPL v2");