blob: 7ec30b08b3bdc285872e0139997a300497450f98 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +030027 * - Extended Stream ID (16 bit)
Will Deacon45ae7cf2013-06-24 18:31:25 +010028 */
29
30#define pr_fmt(fmt) "arm-smmu: " fmt
31
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +000032#include <linux/acpi.h>
33#include <linux/acpi_iort.h>
Robin Murphy1f3d5ca2016-09-12 17:13:49 +010034#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010035#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000036#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010037#include <linux/dma-mapping.h>
38#include <linux/err.h>
39#include <linux/interrupt.h>
40#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010041#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010042#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000043#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010044#include <linux/module.h>
45#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010046#include <linux/of_address.h>
Robin Murphyd6fc5d92016-09-12 17:13:52 +010047#include <linux/of_device.h>
Robin Murphyadfec2e2016-09-12 17:13:55 +010048#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010049#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010050#include <linux/platform_device.h>
51#include <linux/slab.h>
52#include <linux/spinlock.h>
53
54#include <linux/amba/bus.h>
55
Will Deacon518f7132014-11-14 17:17:54 +000056#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010057
Will Deacon45ae7cf2013-06-24 18:31:25 +010058/* Maximum number of context banks per SMMU */
59#define ARM_SMMU_MAX_CBS 128
60
Will Deacon45ae7cf2013-06-24 18:31:25 +010061/* SMMU global address space */
62#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010063#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010064
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000065/*
66 * SMMU global address space with conditional offset to access secure
67 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
68 * nsGFSYNR0: 0x450)
69 */
70#define ARM_SMMU_GR0_NS(smmu) \
71 ((smmu)->base + \
72 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
73 ? 0x400 : 0))
74
Robin Murphyf9a05f02016-04-13 18:13:01 +010075/*
76 * Some 64-bit registers only make sense to write atomically, but in such
77 * cases all the data relevant to AArch32 formats lies within the lower word,
78 * therefore this actually makes more sense than it might first appear.
79 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010080#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010081#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010082#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010083#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010084#endif
85
Will Deacon45ae7cf2013-06-24 18:31:25 +010086/* Configuration registers */
87#define ARM_SMMU_GR0_sCR0 0x0
88#define sCR0_CLIENTPD (1 << 0)
89#define sCR0_GFRE (1 << 1)
90#define sCR0_GFIE (1 << 2)
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +030091#define sCR0_EXIDENABLE (1 << 3)
Will Deacon45ae7cf2013-06-24 18:31:25 +010092#define sCR0_GCFGFRE (1 << 4)
93#define sCR0_GCFGFIE (1 << 5)
94#define sCR0_USFCFG (1 << 10)
95#define sCR0_VMIDPNE (1 << 11)
96#define sCR0_PTM (1 << 12)
97#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080098#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010099#define sCR0_BSU_SHIFT 14
100#define sCR0_BSU_MASK 0x3
101
Peng Fan3ca37122016-05-03 21:50:30 +0800102/* Auxiliary Configuration register */
103#define ARM_SMMU_GR0_sACR 0x10
104
Will Deacon45ae7cf2013-06-24 18:31:25 +0100105/* Identification registers */
106#define ARM_SMMU_GR0_ID0 0x20
107#define ARM_SMMU_GR0_ID1 0x24
108#define ARM_SMMU_GR0_ID2 0x28
109#define ARM_SMMU_GR0_ID3 0x2c
110#define ARM_SMMU_GR0_ID4 0x30
111#define ARM_SMMU_GR0_ID5 0x34
112#define ARM_SMMU_GR0_ID6 0x38
113#define ARM_SMMU_GR0_ID7 0x3c
114#define ARM_SMMU_GR0_sGFSR 0x48
115#define ARM_SMMU_GR0_sGFSYNR0 0x50
116#define ARM_SMMU_GR0_sGFSYNR1 0x54
117#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100118
119#define ID0_S1TS (1 << 30)
120#define ID0_S2TS (1 << 29)
121#define ID0_NTS (1 << 28)
122#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000123#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100124#define ID0_PTFS_NO_AARCH32 (1 << 25)
125#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100126#define ID0_CTTW (1 << 14)
127#define ID0_NUMIRPT_SHIFT 16
128#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700129#define ID0_NUMSIDB_SHIFT 9
130#define ID0_NUMSIDB_MASK 0xf
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300131#define ID0_EXIDS (1 << 8)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100132#define ID0_NUMSMRG_SHIFT 0
133#define ID0_NUMSMRG_MASK 0xff
134
135#define ID1_PAGESIZE (1 << 31)
136#define ID1_NUMPAGENDXB_SHIFT 28
137#define ID1_NUMPAGENDXB_MASK 7
138#define ID1_NUMS2CB_SHIFT 16
139#define ID1_NUMS2CB_MASK 0xff
140#define ID1_NUMCB_SHIFT 0
141#define ID1_NUMCB_MASK 0xff
142
143#define ID2_OAS_SHIFT 4
144#define ID2_OAS_MASK 0xf
145#define ID2_IAS_SHIFT 0
146#define ID2_IAS_MASK 0xf
147#define ID2_UBS_SHIFT 8
148#define ID2_UBS_MASK 0xf
149#define ID2_PTFS_4K (1 << 12)
150#define ID2_PTFS_16K (1 << 13)
151#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800152#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100153
Peng Fan3ca37122016-05-03 21:50:30 +0800154#define ID7_MAJOR_SHIFT 4
155#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100156
Will Deacon45ae7cf2013-06-24 18:31:25 +0100157/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100158#define ARM_SMMU_GR0_TLBIVMID 0x64
159#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
160#define ARM_SMMU_GR0_TLBIALLH 0x6c
161#define ARM_SMMU_GR0_sTLBGSYNC 0x70
162#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
163#define sTLBGSTATUS_GSACTIVE (1 << 0)
164#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
Robin Murphy8513c892017-03-30 17:56:32 +0100165#define TLB_SPIN_COUNT 10
Will Deacon45ae7cf2013-06-24 18:31:25 +0100166
167/* Stream mapping registers */
168#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
169#define SMR_VALID (1 << 31)
170#define SMR_MASK_SHIFT 16
Will Deacon45ae7cf2013-06-24 18:31:25 +0100171#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100172
173#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
174#define S2CR_CBNDX_SHIFT 0
175#define S2CR_CBNDX_MASK 0xff
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300176#define S2CR_EXIDVALID (1 << 10)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100177#define S2CR_TYPE_SHIFT 16
178#define S2CR_TYPE_MASK 0x3
Robin Murphy8e8b2032016-09-12 17:13:50 +0100179enum arm_smmu_s2cr_type {
180 S2CR_TYPE_TRANS,
181 S2CR_TYPE_BYPASS,
182 S2CR_TYPE_FAULT,
183};
Will Deacon45ae7cf2013-06-24 18:31:25 +0100184
Robin Murphyd3461802016-01-26 18:06:34 +0000185#define S2CR_PRIVCFG_SHIFT 24
Robin Murphy8e8b2032016-09-12 17:13:50 +0100186#define S2CR_PRIVCFG_MASK 0x3
187enum arm_smmu_s2cr_privcfg {
188 S2CR_PRIVCFG_DEFAULT,
189 S2CR_PRIVCFG_DIPAN,
190 S2CR_PRIVCFG_UNPRIV,
191 S2CR_PRIVCFG_PRIV,
192};
Robin Murphyd3461802016-01-26 18:06:34 +0000193
Will Deacon45ae7cf2013-06-24 18:31:25 +0100194/* Context bank attribute registers */
195#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
196#define CBAR_VMID_SHIFT 0
197#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000198#define CBAR_S1_BPSHCFG_SHIFT 8
199#define CBAR_S1_BPSHCFG_MASK 3
200#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100201#define CBAR_S1_MEMATTR_SHIFT 12
202#define CBAR_S1_MEMATTR_MASK 0xf
203#define CBAR_S1_MEMATTR_WB 0xf
204#define CBAR_TYPE_SHIFT 16
205#define CBAR_TYPE_MASK 0x3
206#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
207#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
208#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
209#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
210#define CBAR_IRPTNDX_SHIFT 24
211#define CBAR_IRPTNDX_MASK 0xff
212
213#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
214#define CBA2R_RW64_32BIT (0 << 0)
215#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800216#define CBA2R_VMID_SHIFT 16
217#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100218
219/* Translation context bank */
Robin Murphy452107c2017-03-30 17:56:30 +0100220#define ARM_SMMU_CB(smmu, n) ((smmu)->cb_base + ((n) << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100221
222#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100223#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100224#define ARM_SMMU_CB_RESUME 0x8
225#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100226#define ARM_SMMU_CB_TTBR0 0x20
227#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100228#define ARM_SMMU_CB_TTBCR 0x30
Robin Murphy60705292016-08-11 17:44:06 +0100229#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100230#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000231#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100232#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100233#define ARM_SMMU_CB_FSR 0x58
Robin Murphyf9a05f02016-04-13 18:13:01 +0100234#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100235#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000236#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100237#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000238#define ARM_SMMU_CB_S1_TLBIVAL 0x620
239#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
240#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Robin Murphy11febfc2017-03-30 17:56:31 +0100241#define ARM_SMMU_CB_TLBSYNC 0x7f0
242#define ARM_SMMU_CB_TLBSTATUS 0x7f4
Robin Murphy661d9622015-05-27 17:09:34 +0100243#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000244#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100245
246#define SCTLR_S1_ASIDPNE (1 << 12)
247#define SCTLR_CFCFG (1 << 7)
248#define SCTLR_CFIE (1 << 6)
249#define SCTLR_CFRE (1 << 5)
250#define SCTLR_E (1 << 4)
251#define SCTLR_AFE (1 << 2)
252#define SCTLR_TRE (1 << 1)
253#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100254
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100255#define ARM_MMU500_ACTLR_CPRE (1 << 1)
256
Peng Fan3ca37122016-05-03 21:50:30 +0800257#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
Nipun Gupta6eb18d42016-11-04 15:25:23 +0530258#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
Peng Fan3ca37122016-05-03 21:50:30 +0800259
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000260#define CB_PAR_F (1 << 0)
261
262#define ATSR_ACTIVE (1 << 0)
263
Will Deacon45ae7cf2013-06-24 18:31:25 +0100264#define RESUME_RETRY (0 << 0)
265#define RESUME_TERMINATE (1 << 0)
266
Will Deacon45ae7cf2013-06-24 18:31:25 +0100267#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100268#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Tomasz Nowicki3677a642017-01-16 08:16:07 +0100269#define TTBCR2_AS (1 << 4)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100270
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100271#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100272
273#define FSR_MULTI (1 << 31)
274#define FSR_SS (1 << 30)
275#define FSR_UUT (1 << 8)
276#define FSR_ASF (1 << 7)
277#define FSR_TLBLKF (1 << 6)
278#define FSR_TLBMCF (1 << 5)
279#define FSR_EF (1 << 4)
280#define FSR_PF (1 << 3)
281#define FSR_AFF (1 << 2)
282#define FSR_TF (1 << 1)
283
Mitchel Humpherys29073202014-07-08 09:52:18 -0700284#define FSR_IGN (FSR_AFF | FSR_ASF | \
285 FSR_TLBMCF | FSR_TLBLKF)
286#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100287 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100288
289#define FSYNR0_WNR (1 << 4)
290
Eric Augerf3ebee82017-01-19 20:57:55 +0000291#define MSI_IOVA_BASE 0x8000000
292#define MSI_IOVA_LENGTH 0x100000
293
Will Deacon4cf740b2014-07-14 19:47:39 +0100294static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000295module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100296MODULE_PARM_DESC(force_stage,
297 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000298static bool disable_bypass;
299module_param(disable_bypass, bool, S_IRUGO);
300MODULE_PARM_DESC(disable_bypass,
301 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100302
Robin Murphy09360402014-08-28 17:51:59 +0100303enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100304 ARM_SMMU_V1,
305 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100306 ARM_SMMU_V2,
307};
308
Robin Murphy67b65a32016-04-13 18:12:57 +0100309enum arm_smmu_implementation {
310 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100311 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100312 CAVIUM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100313};
314
Robin Murphy8e8b2032016-09-12 17:13:50 +0100315struct arm_smmu_s2cr {
Robin Murphy588888a2016-09-12 17:13:54 +0100316 struct iommu_group *group;
317 int count;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100318 enum arm_smmu_s2cr_type type;
319 enum arm_smmu_s2cr_privcfg privcfg;
320 u8 cbndx;
321};
322
323#define s2cr_init_val (struct arm_smmu_s2cr){ \
324 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
325}
326
Will Deacon45ae7cf2013-06-24 18:31:25 +0100327struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100328 u16 mask;
329 u16 id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100330 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100331};
332
Will Deacona9a1b0b2014-05-01 18:05:08 +0100333struct arm_smmu_master_cfg {
Robin Murphyf80cd882016-09-14 15:21:39 +0100334 struct arm_smmu_device *smmu;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100335 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100336};
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100337#define INVALID_SMENDX -1
Robin Murphyadfec2e2016-09-12 17:13:55 +0100338#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
339#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000340#define fwspec_smendx(fw, i) \
341 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphyadfec2e2016-09-12 17:13:55 +0100342#define for_each_cfg_sme(fw, i, idx) \
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000343 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100344
345struct arm_smmu_device {
346 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100347
348 void __iomem *base;
Robin Murphy452107c2017-03-30 17:56:30 +0100349 void __iomem *cb_base;
Will Deaconc757e852014-07-30 11:33:25 +0100350 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100351
352#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
353#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
354#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
355#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
356#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000357#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800358#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100359#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
360#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
361#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
362#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
363#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300364#define ARM_SMMU_FEAT_EXIDS (1 << 12)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100365 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000366
367#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
368 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100369 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100370 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100371
372 u32 num_context_banks;
373 u32 num_s2_context_banks;
374 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
375 atomic_t irptndx;
376
377 u32 num_mapping_groups;
Robin Murphy21174242016-09-12 17:13:48 +0100378 u16 streamid_mask;
379 u16 smr_mask_mask;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100380 struct arm_smmu_smr *smrs;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100381 struct arm_smmu_s2cr *s2crs;
Robin Murphy588888a2016-09-12 17:13:54 +0100382 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100383
Will Deacon518f7132014-11-14 17:17:54 +0000384 unsigned long va_size;
385 unsigned long ipa_size;
386 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100387 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100388
389 u32 num_global_irqs;
390 u32 num_context_irqs;
391 unsigned int *irqs;
392
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800393 u32 cavium_id_base; /* Specific to Cavium */
Joerg Roedel9648cbc2017-02-01 18:11:36 +0100394
395 /* IOMMU core code handle */
396 struct iommu_device iommu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100397};
398
Robin Murphy7602b872016-04-28 17:12:09 +0100399enum arm_smmu_context_fmt {
400 ARM_SMMU_CTX_FMT_NONE,
401 ARM_SMMU_CTX_FMT_AARCH64,
402 ARM_SMMU_CTX_FMT_AARCH32_L,
403 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100404};
405
406struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100407 u8 cbndx;
408 u8 irptndx;
Robin Murphy280b6832017-03-30 17:56:29 +0100409 union {
410 u16 asid;
411 u16 vmid;
412 };
Will Deacon45ae7cf2013-06-24 18:31:25 +0100413 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100414 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100415};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100416#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100417
Will Deaconc752ce42014-06-25 22:46:31 +0100418enum arm_smmu_domain_stage {
419 ARM_SMMU_DOMAIN_S1 = 0,
420 ARM_SMMU_DOMAIN_S2,
421 ARM_SMMU_DOMAIN_NESTED,
Will Deacon61bc6712017-01-06 16:56:03 +0000422 ARM_SMMU_DOMAIN_BYPASS,
Will Deaconc752ce42014-06-25 22:46:31 +0100423};
424
Will Deacon45ae7cf2013-06-24 18:31:25 +0100425struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100426 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000427 struct io_pgtable_ops *pgtbl_ops;
428 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100429 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100430 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000431 struct mutex init_mutex; /* Protects smmu pointer */
Joerg Roedel1d672632015-03-26 13:43:10 +0100432 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100433};
434
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000435struct arm_smmu_option_prop {
436 u32 opt;
437 const char *prop;
438};
439
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800440static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
441
Robin Murphy021bb842016-09-14 15:26:46 +0100442static bool using_legacy_binding, using_generic_binding;
443
Mitchel Humpherys29073202014-07-08 09:52:18 -0700444static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000445 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
446 { 0, NULL},
447};
448
Joerg Roedel1d672632015-03-26 13:43:10 +0100449static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
450{
451 return container_of(dom, struct arm_smmu_domain, domain);
452}
453
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000454static void parse_driver_options(struct arm_smmu_device *smmu)
455{
456 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700457
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000458 do {
459 if (of_property_read_bool(smmu->dev->of_node,
460 arm_smmu_options[i].prop)) {
461 smmu->options |= arm_smmu_options[i].opt;
462 dev_notice(smmu->dev, "option %s\n",
463 arm_smmu_options[i].prop);
464 }
465 } while (arm_smmu_options[++i].opt);
466}
467
Will Deacon8f68f8e2014-07-15 11:27:08 +0100468static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100469{
470 if (dev_is_pci(dev)) {
471 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700472
Will Deacona9a1b0b2014-05-01 18:05:08 +0100473 while (!pci_is_root_bus(bus))
474 bus = bus->parent;
Robin Murphyf80cd882016-09-14 15:21:39 +0100475 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100476 }
477
Robin Murphyf80cd882016-09-14 15:21:39 +0100478 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100479}
480
Robin Murphyf80cd882016-09-14 15:21:39 +0100481static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100482{
Robin Murphyf80cd882016-09-14 15:21:39 +0100483 *((__be32 *)data) = cpu_to_be32(alias);
484 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100485}
486
Robin Murphyf80cd882016-09-14 15:21:39 +0100487static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100488{
Robin Murphyf80cd882016-09-14 15:21:39 +0100489 struct of_phandle_iterator *it = *(void **)data;
490 struct device_node *np = it->node;
491 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100492
Robin Murphyf80cd882016-09-14 15:21:39 +0100493 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
494 "#stream-id-cells", 0)
495 if (it->node == np) {
496 *(void **)data = dev;
497 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700498 }
Robin Murphyf80cd882016-09-14 15:21:39 +0100499 it->node = np;
500 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100501}
502
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100503static struct platform_driver arm_smmu_driver;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100504static struct iommu_ops arm_smmu_ops;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100505
Robin Murphyadfec2e2016-09-12 17:13:55 +0100506static int arm_smmu_register_legacy_master(struct device *dev,
507 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100508{
Robin Murphyadfec2e2016-09-12 17:13:55 +0100509 struct device *smmu_dev;
Robin Murphyf80cd882016-09-14 15:21:39 +0100510 struct device_node *np;
511 struct of_phandle_iterator it;
512 void *data = &it;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100513 u32 *sids;
Robin Murphyf80cd882016-09-14 15:21:39 +0100514 __be32 pci_sid;
515 int err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100516
Robin Murphyf80cd882016-09-14 15:21:39 +0100517 np = dev_get_dev_node(dev);
518 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
519 of_node_put(np);
520 return -ENODEV;
521 }
522
523 it.node = np;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100524 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
525 __find_legacy_master_phandle);
Robin Murphyadfec2e2016-09-12 17:13:55 +0100526 smmu_dev = data;
Robin Murphyf80cd882016-09-14 15:21:39 +0100527 of_node_put(np);
528 if (err == 0)
529 return -ENODEV;
530 if (err < 0)
531 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100532
Robin Murphyf80cd882016-09-14 15:21:39 +0100533 if (dev_is_pci(dev)) {
534 /* "mmu-masters" assumes Stream ID == Requester ID */
535 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
536 &pci_sid);
537 it.cur = &pci_sid;
538 it.cur_count = 1;
539 }
540
Robin Murphyadfec2e2016-09-12 17:13:55 +0100541 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
542 &arm_smmu_ops);
543 if (err)
544 return err;
545
546 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
547 if (!sids)
Robin Murphyf80cd882016-09-14 15:21:39 +0100548 return -ENOMEM;
549
Robin Murphyadfec2e2016-09-12 17:13:55 +0100550 *smmu = dev_get_drvdata(smmu_dev);
551 of_phandle_iterator_args(&it, sids, it.cur_count);
552 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
553 kfree(sids);
554 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100555}
556
557static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
558{
559 int idx;
560
561 do {
562 idx = find_next_zero_bit(map, end, start);
563 if (idx == end)
564 return -ENOSPC;
565 } while (test_and_set_bit(idx, map));
566
567 return idx;
568}
569
570static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
571{
572 clear_bit(idx, map);
573}
574
575/* Wait for any pending TLB invalidations to complete */
Robin Murphy11febfc2017-03-30 17:56:31 +0100576static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
577 void __iomem *sync, void __iomem *status)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100578{
Robin Murphy8513c892017-03-30 17:56:32 +0100579 unsigned int spin_cnt, delay;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100580
Robin Murphy11febfc2017-03-30 17:56:31 +0100581 writel_relaxed(0, sync);
Robin Murphy8513c892017-03-30 17:56:32 +0100582 for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
583 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
584 if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
585 return;
586 cpu_relax();
Will Deacon45ae7cf2013-06-24 18:31:25 +0100587 }
Robin Murphy8513c892017-03-30 17:56:32 +0100588 udelay(delay);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100589 }
Robin Murphy8513c892017-03-30 17:56:32 +0100590 dev_err_ratelimited(smmu->dev,
591 "TLB sync timed out -- SMMU may be deadlocked\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +0100592}
593
Robin Murphy11febfc2017-03-30 17:56:31 +0100594static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
Will Deacon1463fe42013-07-31 19:21:27 +0100595{
Robin Murphy11febfc2017-03-30 17:56:31 +0100596 void __iomem *base = ARM_SMMU_GR0(smmu);
597
598 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
599 base + ARM_SMMU_GR0_sTLBGSTATUS);
Will Deacon518f7132014-11-14 17:17:54 +0000600}
601
Robin Murphy11febfc2017-03-30 17:56:31 +0100602static void arm_smmu_tlb_sync_context(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100603{
Will Deacon518f7132014-11-14 17:17:54 +0000604 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy11febfc2017-03-30 17:56:31 +0100605 struct arm_smmu_device *smmu = smmu_domain->smmu;
606 void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
607
608 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
609 base + ARM_SMMU_CB_TLBSTATUS);
Will Deacon518f7132014-11-14 17:17:54 +0000610}
611
Robin Murphy11febfc2017-03-30 17:56:31 +0100612static void arm_smmu_tlb_sync_vmid(void *cookie)
613{
614 struct arm_smmu_domain *smmu_domain = cookie;
615
616 arm_smmu_tlb_sync_global(smmu_domain->smmu);
617}
618
619static void arm_smmu_tlb_inv_context_s1(void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000620{
621 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100622 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy11febfc2017-03-30 17:56:31 +0100623 void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
624
625 writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
626 arm_smmu_tlb_sync_context(cookie);
627}
628
629static void arm_smmu_tlb_inv_context_s2(void *cookie)
630{
631 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100632 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy11febfc2017-03-30 17:56:31 +0100633 void __iomem *base = ARM_SMMU_GR0(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100634
Robin Murphy11febfc2017-03-30 17:56:31 +0100635 writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
636 arm_smmu_tlb_sync_global(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100637}
638
Will Deacon518f7132014-11-14 17:17:54 +0000639static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000640 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000641{
642 struct arm_smmu_domain *smmu_domain = cookie;
643 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000644 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Robin Murphy11febfc2017-03-30 17:56:31 +0100645 void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000646
647 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +0000648 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
649
Robin Murphy7602b872016-04-28 17:12:09 +0100650 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000651 iova &= ~12UL;
Robin Murphy280b6832017-03-30 17:56:29 +0100652 iova |= cfg->asid;
Robin Murphy75df1382015-12-07 18:18:52 +0000653 do {
654 writel_relaxed(iova, reg);
655 iova += granule;
656 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000657 } else {
658 iova >>= 12;
Robin Murphy280b6832017-03-30 17:56:29 +0100659 iova |= (u64)cfg->asid << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000660 do {
661 writeq_relaxed(iova, reg);
662 iova += granule >> 12;
663 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000664 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100665 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000666 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
667 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000668 iova >>= 12;
669 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100670 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000671 iova += granule >> 12;
672 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000673 }
674}
675
Robin Murphy11febfc2017-03-30 17:56:31 +0100676/*
677 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
678 * almost negligible, but the benefit of getting the first one in as far ahead
679 * of the sync as possible is significant, hence we don't just make this a
680 * no-op and set .tlb_sync to arm_smmu_inv_context_s2() as you might think.
681 */
682static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
683 size_t granule, bool leaf, void *cookie)
684{
685 struct arm_smmu_domain *smmu_domain = cookie;
686 void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
687
688 writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
689}
690
691static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = {
692 .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
Will Deacon518f7132014-11-14 17:17:54 +0000693 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
Robin Murphy11febfc2017-03-30 17:56:31 +0100694 .tlb_sync = arm_smmu_tlb_sync_context,
695};
696
697static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = {
698 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
699 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
700 .tlb_sync = arm_smmu_tlb_sync_context,
701};
702
703static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = {
704 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
705 .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync,
706 .tlb_sync = arm_smmu_tlb_sync_vmid,
Will Deacon518f7132014-11-14 17:17:54 +0000707};
708
Will Deacon45ae7cf2013-06-24 18:31:25 +0100709static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
710{
Will Deacon3714ce1d2016-08-05 19:49:45 +0100711 u32 fsr, fsynr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100712 unsigned long iova;
713 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100714 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100715 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
716 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100717 void __iomem *cb_base;
718
Robin Murphy452107c2017-03-30 17:56:30 +0100719 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100720 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
721
722 if (!(fsr & FSR_FAULT))
723 return IRQ_NONE;
724
Will Deacon45ae7cf2013-06-24 18:31:25 +0100725 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Robin Murphyf9a05f02016-04-13 18:13:01 +0100726 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100727
Will Deacon3714ce1d2016-08-05 19:49:45 +0100728 dev_err_ratelimited(smmu->dev,
729 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
730 fsr, iova, fsynr, cfg->cbndx);
731
Will Deacon45ae7cf2013-06-24 18:31:25 +0100732 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100733 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100734}
735
736static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
737{
738 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
739 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000740 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100741
742 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
743 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
744 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
745 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
746
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000747 if (!gfsr)
748 return IRQ_NONE;
749
Will Deacon45ae7cf2013-06-24 18:31:25 +0100750 dev_err_ratelimited(smmu->dev,
751 "Unexpected global fault, this could be serious\n");
752 dev_err_ratelimited(smmu->dev,
753 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
754 gfsr, gfsynr0, gfsynr1, gfsynr2);
755
756 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100757 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100758}
759
Will Deacon518f7132014-11-14 17:17:54 +0000760static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
761 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100762{
Robin Murphy60705292016-08-11 17:44:06 +0100763 u32 reg, reg2;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100764 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100765 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +0100766 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
767 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100768 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100769
Will Deacon45ae7cf2013-06-24 18:31:25 +0100770 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100771 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Robin Murphy452107c2017-03-30 17:56:30 +0100772 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100773
Will Deacon4a1c93c2015-03-04 12:21:03 +0000774 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100775 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
776 reg = CBA2R_RW64_64BIT;
777 else
778 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800779 /* 16-bit VMIDs live in CBA2R */
780 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Robin Murphy280b6832017-03-30 17:56:29 +0100781 reg |= cfg->vmid << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800782
Will Deacon4a1c93c2015-03-04 12:21:03 +0000783 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
784 }
785
Will Deacon45ae7cf2013-06-24 18:31:25 +0100786 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100787 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100788 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700789 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100790
Will Deacon57ca90f2014-02-06 14:59:05 +0000791 /*
792 * Use the weakest shareability/memory types, so they are
793 * overridden by the ttbcr/pte.
794 */
795 if (stage1) {
796 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
797 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800798 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
799 /* 8-bit VMIDs live in CBAR */
Robin Murphy280b6832017-03-30 17:56:29 +0100800 reg |= cfg->vmid << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000801 }
Will Deacon44680ee2014-06-25 11:29:12 +0100802 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100803
Sunil Goutham125458a2017-03-28 16:11:12 +0530804 /*
805 * TTBCR
806 * We must write this before the TTBRs, since it determines the
807 * access behaviour of some fields (in particular, ASID[15:8]).
808 */
Will Deacon518f7132014-11-14 17:17:54 +0000809 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100810 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
811 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
812 reg2 = 0;
813 } else {
814 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
815 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
816 reg2 |= TTBCR2_SEP_UPSTREAM;
Tomasz Nowicki3677a642017-01-16 08:16:07 +0100817 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
818 reg2 |= TTBCR2_AS;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100819 }
Robin Murphy60705292016-08-11 17:44:06 +0100820 if (smmu->version > ARM_SMMU_V1)
821 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100822 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000823 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100824 }
Robin Murphy60705292016-08-11 17:44:06 +0100825 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100826
Will Deacon45ae7cf2013-06-24 18:31:25 +0100827 /* TTBRs */
828 if (stage1) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100829 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
830 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
831 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
832 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
833 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
Robin Murphy280b6832017-03-30 17:56:29 +0100834 writel_relaxed(cfg->asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100835 } else {
836 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Robin Murphy280b6832017-03-30 17:56:29 +0100837 reg64 |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100838 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
839 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Robin Murphy280b6832017-03-30 17:56:29 +0100840 reg64 |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100841 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
842 }
843 } else {
844 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
845 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
846 }
847
Will Deacon518f7132014-11-14 17:17:54 +0000848 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100849 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100850 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
851 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
852 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
853 } else {
854 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
855 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
856 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100857 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Robin Murphy60705292016-08-11 17:44:06 +0100858 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100859 }
860
Will Deacon45ae7cf2013-06-24 18:31:25 +0100861 /* SCTLR */
Robin Murphy60705292016-08-11 17:44:06 +0100862 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100863 if (stage1)
864 reg |= SCTLR_S1_ASIDPNE;
865#ifdef __BIG_ENDIAN
866 reg |= SCTLR_E;
867#endif
Will Deacon25724842013-08-21 13:49:53 +0100868 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100869}
870
871static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100872 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100873{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100874 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000875 unsigned long ias, oas;
876 struct io_pgtable_ops *pgtbl_ops;
877 struct io_pgtable_cfg pgtbl_cfg;
878 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100879 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100880 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy11febfc2017-03-30 17:56:31 +0100881 const struct iommu_gather_ops *tlb_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100882
Will Deacon518f7132014-11-14 17:17:54 +0000883 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100884 if (smmu_domain->smmu)
885 goto out_unlock;
886
Will Deacon61bc6712017-01-06 16:56:03 +0000887 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
888 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
889 smmu_domain->smmu = smmu;
890 goto out_unlock;
891 }
892
Will Deaconc752ce42014-06-25 22:46:31 +0100893 /*
894 * Mapping the requested stage onto what we support is surprisingly
895 * complicated, mainly because the spec allows S1+S2 SMMUs without
896 * support for nested translation. That means we end up with the
897 * following table:
898 *
899 * Requested Supported Actual
900 * S1 N S1
901 * S1 S1+S2 S1
902 * S1 S2 S2
903 * S1 S1 S1
904 * N N N
905 * N S1+S2 S2
906 * N S2 S2
907 * N S1 S1
908 *
909 * Note that you can't actually request stage-2 mappings.
910 */
911 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
912 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
913 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
914 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
915
Robin Murphy7602b872016-04-28 17:12:09 +0100916 /*
917 * Choosing a suitable context format is even more fiddly. Until we
918 * grow some way for the caller to express a preference, and/or move
919 * the decision into the io-pgtable code where it arguably belongs,
920 * just aim for the closest thing to the rest of the system, and hope
921 * that the hardware isn't esoteric enough that we can't assume AArch64
922 * support to be a superset of AArch32 support...
923 */
924 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
925 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100926 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
927 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
928 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
929 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
930 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100931 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
932 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
933 ARM_SMMU_FEAT_FMT_AARCH64_16K |
934 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
935 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
936
937 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
938 ret = -EINVAL;
939 goto out_unlock;
940 }
941
Will Deaconc752ce42014-06-25 22:46:31 +0100942 switch (smmu_domain->stage) {
943 case ARM_SMMU_DOMAIN_S1:
944 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
945 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000946 ias = smmu->va_size;
947 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100948 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000949 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +0100950 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +0000951 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100952 ias = min(ias, 32UL);
953 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +0100954 } else {
955 fmt = ARM_V7S;
956 ias = min(ias, 32UL);
957 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +0100958 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100959 tlb_ops = &arm_smmu_s1_tlb_ops;
Will Deaconc752ce42014-06-25 22:46:31 +0100960 break;
961 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100962 /*
963 * We will likely want to change this if/when KVM gets
964 * involved.
965 */
Will Deaconc752ce42014-06-25 22:46:31 +0100966 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100967 cfg->cbar = CBAR_TYPE_S2_TRANS;
968 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000969 ias = smmu->ipa_size;
970 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100971 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000972 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100973 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000974 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100975 ias = min(ias, 40UL);
976 oas = min(oas, 40UL);
977 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100978 if (smmu->version == ARM_SMMU_V2)
979 tlb_ops = &arm_smmu_s2_tlb_ops_v2;
980 else
981 tlb_ops = &arm_smmu_s2_tlb_ops_v1;
Will Deaconc752ce42014-06-25 22:46:31 +0100982 break;
983 default:
984 ret = -EINVAL;
985 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100986 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100987 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
988 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200989 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100990 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100991
Will Deacon44680ee2014-06-25 11:29:12 +0100992 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100993 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100994 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
995 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100996 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100997 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100998 }
999
Robin Murphy280b6832017-03-30 17:56:29 +01001000 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
1001 cfg->vmid = cfg->cbndx + 1 + smmu->cavium_id_base;
1002 else
1003 cfg->asid = cfg->cbndx + smmu->cavium_id_base;
1004
Will Deacon518f7132014-11-14 17:17:54 +00001005 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +01001006 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001007 .ias = ias,
1008 .oas = oas,
Robin Murphy11febfc2017-03-30 17:56:31 +01001009 .tlb = tlb_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +01001010 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001011 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001012
Will Deacon518f7132014-11-14 17:17:54 +00001013 smmu_domain->smmu = smmu;
1014 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
1015 if (!pgtbl_ops) {
1016 ret = -ENOMEM;
1017 goto out_clear_smmu;
1018 }
1019
Robin Murphyd5466352016-05-09 17:20:09 +01001020 /* Update the domain's page sizes to reflect the page table format */
1021 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Robin Murphy455eb7d2016-09-12 17:13:58 +01001022 domain->geometry.aperture_end = (1UL << ias) - 1;
1023 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +00001024
1025 /* Initialise the context bank with our page table cfg */
1026 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
1027
1028 /*
1029 * Request context fault interrupt. Do this last to avoid the
1030 * handler seeing a half-initialised domain state.
1031 */
Will Deacon44680ee2014-06-25 11:29:12 +01001032 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001033 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
1034 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001035 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001036 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +01001037 cfg->irptndx, irq);
1038 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001039 }
1040
Will Deacon518f7132014-11-14 17:17:54 +00001041 mutex_unlock(&smmu_domain->init_mutex);
1042
1043 /* Publish page table ops for map/unmap */
1044 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001045 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001046
Will Deacon518f7132014-11-14 17:17:54 +00001047out_clear_smmu:
1048 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001049out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001050 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001051 return ret;
1052}
1053
1054static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1055{
Joerg Roedel1d672632015-03-26 13:43:10 +01001056 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001057 struct arm_smmu_device *smmu = smmu_domain->smmu;
1058 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001059 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001060 int irq;
1061
Will Deacon61bc6712017-01-06 16:56:03 +00001062 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001063 return;
1064
Will Deacon518f7132014-11-14 17:17:54 +00001065 /*
1066 * Disable the context bank and free the page tables before freeing
1067 * it.
1068 */
Robin Murphy452107c2017-03-30 17:56:30 +01001069 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001070 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001071
Will Deacon44680ee2014-06-25 11:29:12 +01001072 if (cfg->irptndx != INVALID_IRPTNDX) {
1073 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001074 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001075 }
1076
Markus Elfring44830b02015-11-06 18:32:41 +01001077 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +01001078 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001079}
1080
Joerg Roedel1d672632015-03-26 13:43:10 +01001081static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001082{
1083 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001084
Will Deacon61bc6712017-01-06 16:56:03 +00001085 if (type != IOMMU_DOMAIN_UNMANAGED &&
1086 type != IOMMU_DOMAIN_DMA &&
1087 type != IOMMU_DOMAIN_IDENTITY)
Joerg Roedel1d672632015-03-26 13:43:10 +01001088 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001089 /*
1090 * Allocate the domain and initialise some of its data structures.
1091 * We can't really do anything meaningful until we've added a
1092 * master.
1093 */
1094 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1095 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001096 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001097
Robin Murphy021bb842016-09-14 15:26:46 +01001098 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
1099 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +00001100 kfree(smmu_domain);
1101 return NULL;
1102 }
1103
Will Deacon518f7132014-11-14 17:17:54 +00001104 mutex_init(&smmu_domain->init_mutex);
1105 spin_lock_init(&smmu_domain->pgtbl_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001106
1107 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001108}
1109
Joerg Roedel1d672632015-03-26 13:43:10 +01001110static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001111{
Joerg Roedel1d672632015-03-26 13:43:10 +01001112 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001113
1114 /*
1115 * Free the domain resources. We assume that all devices have
1116 * already been detached.
1117 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001118 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001119 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001120 kfree(smmu_domain);
1121}
1122
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001123static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1124{
1125 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyf80cd882016-09-14 15:21:39 +01001126 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001127
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001128 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001129 reg |= SMR_VALID;
1130 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1131}
1132
Robin Murphy8e8b2032016-09-12 17:13:50 +01001133static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1134{
1135 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1136 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1137 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1138 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1139
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001140 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
1141 smmu->smrs[idx].valid)
1142 reg |= S2CR_EXIDVALID;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001143 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1144}
1145
1146static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1147{
1148 arm_smmu_write_s2cr(smmu, idx);
1149 if (smmu->smrs)
1150 arm_smmu_write_smr(smmu, idx);
1151}
1152
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001153/*
1154 * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
1155 * should be called after sCR0 is written.
1156 */
1157static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
1158{
1159 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1160 u32 smr;
1161
1162 if (!smmu->smrs)
1163 return;
1164
1165 /*
1166 * SMR.ID bits may not be preserved if the corresponding MASK
1167 * bits are set, so check each one separately. We can reject
1168 * masters later if they try to claim IDs outside these masks.
1169 */
1170 smr = smmu->streamid_mask << SMR_ID_SHIFT;
1171 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1172 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1173 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
1174
1175 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1176 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1177 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1178 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
1179}
1180
Robin Murphy588888a2016-09-12 17:13:54 +01001181static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001182{
1183 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy588888a2016-09-12 17:13:54 +01001184 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001185
Robin Murphy588888a2016-09-12 17:13:54 +01001186 /* Stream indexing is blissfully easy */
1187 if (!smrs)
1188 return id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001189
Robin Murphy588888a2016-09-12 17:13:54 +01001190 /* Validating SMRs is... less so */
1191 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1192 if (!smrs[i].valid) {
1193 /*
1194 * Note the first free entry we come across, which
1195 * we'll claim in the end if nothing else matches.
1196 */
1197 if (free_idx < 0)
1198 free_idx = i;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001199 continue;
1200 }
Robin Murphy588888a2016-09-12 17:13:54 +01001201 /*
1202 * If the new entry is _entirely_ matched by an existing entry,
1203 * then reuse that, with the guarantee that there also cannot
1204 * be any subsequent conflicting entries. In normal use we'd
1205 * expect simply identical entries for this case, but there's
1206 * no harm in accommodating the generalisation.
1207 */
1208 if ((mask & smrs[i].mask) == mask &&
1209 !((id ^ smrs[i].id) & ~smrs[i].mask))
1210 return i;
1211 /*
1212 * If the new entry has any other overlap with an existing one,
1213 * though, then there always exists at least one stream ID
1214 * which would cause a conflict, and we can't allow that risk.
1215 */
1216 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1217 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001218 }
1219
Robin Murphy588888a2016-09-12 17:13:54 +01001220 return free_idx;
1221}
1222
1223static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1224{
1225 if (--smmu->s2crs[idx].count)
1226 return false;
1227
1228 smmu->s2crs[idx] = s2cr_init_val;
1229 if (smmu->smrs)
1230 smmu->smrs[idx].valid = false;
1231
1232 return true;
1233}
1234
1235static int arm_smmu_master_alloc_smes(struct device *dev)
1236{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001237 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1238 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy588888a2016-09-12 17:13:54 +01001239 struct arm_smmu_device *smmu = cfg->smmu;
1240 struct arm_smmu_smr *smrs = smmu->smrs;
1241 struct iommu_group *group;
1242 int i, idx, ret;
1243
1244 mutex_lock(&smmu->stream_map_mutex);
1245 /* Figure out a viable stream map entry allocation */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001246 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy021bb842016-09-14 15:26:46 +01001247 u16 sid = fwspec->ids[i];
1248 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1249
Robin Murphy588888a2016-09-12 17:13:54 +01001250 if (idx != INVALID_SMENDX) {
1251 ret = -EEXIST;
1252 goto out_err;
1253 }
1254
Robin Murphy021bb842016-09-14 15:26:46 +01001255 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy588888a2016-09-12 17:13:54 +01001256 if (ret < 0)
1257 goto out_err;
1258
1259 idx = ret;
1260 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy021bb842016-09-14 15:26:46 +01001261 smrs[idx].id = sid;
1262 smrs[idx].mask = mask;
Robin Murphy588888a2016-09-12 17:13:54 +01001263 smrs[idx].valid = true;
1264 }
1265 smmu->s2crs[idx].count++;
1266 cfg->smendx[i] = (s16)idx;
1267 }
1268
1269 group = iommu_group_get_for_dev(dev);
1270 if (!group)
1271 group = ERR_PTR(-ENOMEM);
1272 if (IS_ERR(group)) {
1273 ret = PTR_ERR(group);
1274 goto out_err;
1275 }
1276 iommu_group_put(group);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001277
Will Deacon45ae7cf2013-06-24 18:31:25 +01001278 /* It worked! Now, poke the actual hardware */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001279 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001280 arm_smmu_write_sme(smmu, idx);
1281 smmu->s2crs[idx].group = group;
1282 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001283
Robin Murphy588888a2016-09-12 17:13:54 +01001284 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001285 return 0;
1286
Robin Murphy588888a2016-09-12 17:13:54 +01001287out_err:
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001288 while (i--) {
Robin Murphy588888a2016-09-12 17:13:54 +01001289 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001290 cfg->smendx[i] = INVALID_SMENDX;
1291 }
Robin Murphy588888a2016-09-12 17:13:54 +01001292 mutex_unlock(&smmu->stream_map_mutex);
1293 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001294}
1295
Robin Murphyadfec2e2016-09-12 17:13:55 +01001296static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001297{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001298 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1299 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphyd3097e32016-09-12 17:13:53 +01001300 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001301
Robin Murphy588888a2016-09-12 17:13:54 +01001302 mutex_lock(&smmu->stream_map_mutex);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001303 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001304 if (arm_smmu_free_sme(smmu, idx))
1305 arm_smmu_write_sme(smmu, idx);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001306 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001307 }
Robin Murphy588888a2016-09-12 17:13:54 +01001308 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001309}
1310
Will Deacon45ae7cf2013-06-24 18:31:25 +01001311static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphyadfec2e2016-09-12 17:13:55 +01001312 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001313{
Will Deacon44680ee2014-06-25 11:29:12 +01001314 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001315 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001316 u8 cbndx = smmu_domain->cfg.cbndx;
Will Deacon61bc6712017-01-06 16:56:03 +00001317 enum arm_smmu_s2cr_type type;
Robin Murphy588888a2016-09-12 17:13:54 +01001318 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001319
Will Deacon61bc6712017-01-06 16:56:03 +00001320 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
1321 type = S2CR_TYPE_BYPASS;
1322 else
1323 type = S2CR_TYPE_TRANS;
1324
Robin Murphyadfec2e2016-09-12 17:13:55 +01001325 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy8e8b2032016-09-12 17:13:50 +01001326 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy588888a2016-09-12 17:13:54 +01001327 continue;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001328
Robin Murphy8e8b2032016-09-12 17:13:50 +01001329 s2cr[idx].type = type;
Sricharan Re1989802017-01-06 18:58:15 +05301330 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001331 s2cr[idx].cbndx = cbndx;
1332 arm_smmu_write_s2cr(smmu, idx);
Will Deacon43b412b2014-07-15 11:22:24 +01001333 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001334 return 0;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001335}
1336
Will Deacon45ae7cf2013-06-24 18:31:25 +01001337static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1338{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001339 int ret;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001340 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1341 struct arm_smmu_device *smmu;
Joerg Roedel1d672632015-03-26 13:43:10 +01001342 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001343
Robin Murphyadfec2e2016-09-12 17:13:55 +01001344 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001345 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1346 return -ENXIO;
1347 }
1348
Robin Murphyfba4f8e2016-10-17 12:06:21 +01001349 /*
1350 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1351 * domains between of_xlate() and add_device() - we have no way to cope
1352 * with that, so until ARM gets converted to rely on groups and default
1353 * domains, just say no (but more politely than by dereferencing NULL).
1354 * This should be at least a WARN_ON once that's sorted.
1355 */
1356 if (!fwspec->iommu_priv)
1357 return -ENODEV;
1358
Robin Murphyadfec2e2016-09-12 17:13:55 +01001359 smmu = fwspec_smmu(fwspec);
Will Deacon518f7132014-11-14 17:17:54 +00001360 /* Ensure that the domain is finalised */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001361 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001362 if (ret < 0)
Will Deacon518f7132014-11-14 17:17:54 +00001363 return ret;
1364
Will Deacon45ae7cf2013-06-24 18:31:25 +01001365 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001366 * Sanity check the domain. We don't support domains across
1367 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001368 */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001369 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001370 dev_err(dev,
1371 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphyadfec2e2016-09-12 17:13:55 +01001372 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001373 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001374 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001375
1376 /* Looks ok, so add the device to the domain */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001377 return arm_smmu_domain_add_master(smmu_domain, fwspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001378}
1379
Will Deacon45ae7cf2013-06-24 18:31:25 +01001380static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001381 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001382{
Will Deacon518f7132014-11-14 17:17:54 +00001383 int ret;
1384 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001385 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001386 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001387
Will Deacon518f7132014-11-14 17:17:54 +00001388 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001389 return -ENODEV;
1390
Will Deacon518f7132014-11-14 17:17:54 +00001391 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1392 ret = ops->map(ops, iova, paddr, size, prot);
1393 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1394 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001395}
1396
1397static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1398 size_t size)
1399{
Will Deacon518f7132014-11-14 17:17:54 +00001400 size_t ret;
1401 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001402 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001403 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001404
Will Deacon518f7132014-11-14 17:17:54 +00001405 if (!ops)
1406 return 0;
1407
1408 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1409 ret = ops->unmap(ops, iova, size);
1410 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1411 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001412}
1413
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001414static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1415 dma_addr_t iova)
1416{
Joerg Roedel1d672632015-03-26 13:43:10 +01001417 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001418 struct arm_smmu_device *smmu = smmu_domain->smmu;
1419 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1420 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1421 struct device *dev = smmu->dev;
1422 void __iomem *cb_base;
1423 u32 tmp;
1424 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01001425 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001426
Robin Murphy452107c2017-03-30 17:56:30 +01001427 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001428
Robin Murphy661d9622015-05-27 17:09:34 +01001429 /* ATS1 registers can only be written atomically */
1430 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001431 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001432 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1433 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001434 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001435
1436 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1437 !(tmp & ATSR_ACTIVE), 5, 50)) {
1438 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001439 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001440 &iova);
1441 return ops->iova_to_phys(ops, iova);
1442 }
1443
Robin Murphyf9a05f02016-04-13 18:13:01 +01001444 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001445 if (phys & CB_PAR_F) {
1446 dev_err(dev, "translation fault!\n");
1447 dev_err(dev, "PAR = 0x%llx\n", phys);
1448 return 0;
1449 }
1450
1451 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1452}
1453
Will Deacon45ae7cf2013-06-24 18:31:25 +01001454static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001455 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001456{
Will Deacon518f7132014-11-14 17:17:54 +00001457 phys_addr_t ret;
1458 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001459 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001460 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001461
Sunil Gouthambdf95922017-04-25 15:27:52 +05301462 if (domain->type == IOMMU_DOMAIN_IDENTITY)
1463 return iova;
1464
Will Deacon518f7132014-11-14 17:17:54 +00001465 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001466 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001467
Will Deacon518f7132014-11-14 17:17:54 +00001468 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001469 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1470 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001471 ret = arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001472 } else {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001473 ret = ops->iova_to_phys(ops, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001474 }
1475
Will Deacon518f7132014-11-14 17:17:54 +00001476 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001477
Will Deacon518f7132014-11-14 17:17:54 +00001478 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001479}
1480
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001481static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001482{
Will Deacond0948942014-06-24 17:30:10 +01001483 switch (cap) {
1484 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001485 /*
1486 * Return true here as the SMMU can always send out coherent
1487 * requests.
1488 */
1489 return true;
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001490 case IOMMU_CAP_NOEXEC:
1491 return true;
Will Deacond0948942014-06-24 17:30:10 +01001492 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001493 return false;
Will Deacond0948942014-06-24 17:30:10 +01001494 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001495}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001496
Robin Murphy021bb842016-09-14 15:26:46 +01001497static int arm_smmu_match_node(struct device *dev, void *data)
1498{
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001499 return dev->fwnode == data;
Robin Murphy021bb842016-09-14 15:26:46 +01001500}
1501
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001502static
1503struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
Robin Murphy021bb842016-09-14 15:26:46 +01001504{
1505 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001506 fwnode, arm_smmu_match_node);
Robin Murphy021bb842016-09-14 15:26:46 +01001507 put_device(dev);
1508 return dev ? dev_get_drvdata(dev) : NULL;
1509}
1510
Will Deacon03edb222015-01-19 14:27:33 +00001511static int arm_smmu_add_device(struct device *dev)
1512{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001513 struct arm_smmu_device *smmu;
Robin Murphyf80cd882016-09-14 15:21:39 +01001514 struct arm_smmu_master_cfg *cfg;
Robin Murphy021bb842016-09-14 15:26:46 +01001515 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphyf80cd882016-09-14 15:21:39 +01001516 int i, ret;
1517
Robin Murphy021bb842016-09-14 15:26:46 +01001518 if (using_legacy_binding) {
1519 ret = arm_smmu_register_legacy_master(dev, &smmu);
1520 fwspec = dev->iommu_fwspec;
1521 if (ret)
1522 goto out_free;
Robin Murphy3c117b52016-11-02 17:31:32 +00001523 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001524 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
Robin Murphy021bb842016-09-14 15:26:46 +01001525 } else {
1526 return -ENODEV;
1527 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001528
1529 ret = -EINVAL;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001530 for (i = 0; i < fwspec->num_ids; i++) {
1531 u16 sid = fwspec->ids[i];
Robin Murphy021bb842016-09-14 15:26:46 +01001532 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyf80cd882016-09-14 15:21:39 +01001533
Robin Murphyadfec2e2016-09-12 17:13:55 +01001534 if (sid & ~smmu->streamid_mask) {
Robin Murphyf80cd882016-09-14 15:21:39 +01001535 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy021bb842016-09-14 15:26:46 +01001536 sid, smmu->streamid_mask);
1537 goto out_free;
1538 }
1539 if (mask & ~smmu->smr_mask_mask) {
1540 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
Peng Fan6323f472017-04-21 17:03:36 +08001541 mask, smmu->smr_mask_mask);
Robin Murphyf80cd882016-09-14 15:21:39 +01001542 goto out_free;
1543 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001544 }
Will Deacon03edb222015-01-19 14:27:33 +00001545
Robin Murphyadfec2e2016-09-12 17:13:55 +01001546 ret = -ENOMEM;
1547 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1548 GFP_KERNEL);
1549 if (!cfg)
1550 goto out_free;
1551
1552 cfg->smmu = smmu;
1553 fwspec->iommu_priv = cfg;
1554 while (i--)
1555 cfg->smendx[i] = INVALID_SMENDX;
1556
Robin Murphy588888a2016-09-12 17:13:54 +01001557 ret = arm_smmu_master_alloc_smes(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001558 if (ret)
1559 goto out_free;
1560
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001561 iommu_device_link(&smmu->iommu, dev);
1562
Robin Murphyadfec2e2016-09-12 17:13:55 +01001563 return 0;
Robin Murphyf80cd882016-09-14 15:21:39 +01001564
1565out_free:
Robin Murphyadfec2e2016-09-12 17:13:55 +01001566 if (fwspec)
1567 kfree(fwspec->iommu_priv);
1568 iommu_fwspec_free(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001569 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00001570}
1571
Will Deacon45ae7cf2013-06-24 18:31:25 +01001572static void arm_smmu_remove_device(struct device *dev)
1573{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001574 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001575 struct arm_smmu_master_cfg *cfg;
1576 struct arm_smmu_device *smmu;
1577
Robin Murphy8e8b2032016-09-12 17:13:50 +01001578
Robin Murphyadfec2e2016-09-12 17:13:55 +01001579 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyf80cd882016-09-14 15:21:39 +01001580 return;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001581
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001582 cfg = fwspec->iommu_priv;
1583 smmu = cfg->smmu;
1584
1585 iommu_device_unlink(&smmu->iommu, dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001586 arm_smmu_master_free_smes(fwspec);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001587 iommu_group_remove_device(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001588 kfree(fwspec->iommu_priv);
1589 iommu_fwspec_free(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001590}
1591
Joerg Roedelaf659932015-10-21 23:51:41 +02001592static struct iommu_group *arm_smmu_device_group(struct device *dev)
1593{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001594 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1595 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy588888a2016-09-12 17:13:54 +01001596 struct iommu_group *group = NULL;
1597 int i, idx;
1598
Robin Murphyadfec2e2016-09-12 17:13:55 +01001599 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001600 if (group && smmu->s2crs[idx].group &&
1601 group != smmu->s2crs[idx].group)
1602 return ERR_PTR(-EINVAL);
1603
1604 group = smmu->s2crs[idx].group;
1605 }
1606
1607 if (group)
Robin Murphye1b44cb2016-11-11 17:59:22 +00001608 return iommu_group_ref_get(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001609
1610 if (dev_is_pci(dev))
1611 group = pci_device_group(dev);
1612 else
1613 group = generic_device_group(dev);
1614
Joerg Roedelaf659932015-10-21 23:51:41 +02001615 return group;
1616}
1617
Will Deaconc752ce42014-06-25 22:46:31 +01001618static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1619 enum iommu_attr attr, void *data)
1620{
Joerg Roedel1d672632015-03-26 13:43:10 +01001621 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001622
Will Deacon0834cc22017-01-06 16:28:17 +00001623 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
1624 return -EINVAL;
1625
Will Deaconc752ce42014-06-25 22:46:31 +01001626 switch (attr) {
1627 case DOMAIN_ATTR_NESTING:
1628 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1629 return 0;
1630 default:
1631 return -ENODEV;
1632 }
1633}
1634
1635static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1636 enum iommu_attr attr, void *data)
1637{
Will Deacon518f7132014-11-14 17:17:54 +00001638 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001639 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001640
Will Deacon0834cc22017-01-06 16:28:17 +00001641 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
1642 return -EINVAL;
1643
Will Deacon518f7132014-11-14 17:17:54 +00001644 mutex_lock(&smmu_domain->init_mutex);
1645
Will Deaconc752ce42014-06-25 22:46:31 +01001646 switch (attr) {
1647 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00001648 if (smmu_domain->smmu) {
1649 ret = -EPERM;
1650 goto out_unlock;
1651 }
1652
Will Deaconc752ce42014-06-25 22:46:31 +01001653 if (*(int *)data)
1654 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1655 else
1656 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1657
Will Deacon518f7132014-11-14 17:17:54 +00001658 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001659 default:
Will Deacon518f7132014-11-14 17:17:54 +00001660 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01001661 }
Will Deacon518f7132014-11-14 17:17:54 +00001662
1663out_unlock:
1664 mutex_unlock(&smmu_domain->init_mutex);
1665 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001666}
1667
Robin Murphy021bb842016-09-14 15:26:46 +01001668static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1669{
Robin Murphy56fbf602017-03-31 12:03:33 +01001670 u32 mask, fwid = 0;
Robin Murphy021bb842016-09-14 15:26:46 +01001671
1672 if (args->args_count > 0)
1673 fwid |= (u16)args->args[0];
1674
1675 if (args->args_count > 1)
1676 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
Robin Murphy56fbf602017-03-31 12:03:33 +01001677 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
1678 fwid |= (u16)mask << SMR_MASK_SHIFT;
Robin Murphy021bb842016-09-14 15:26:46 +01001679
1680 return iommu_fwspec_add_ids(dev, &fwid, 1);
1681}
1682
Eric Augerf3ebee82017-01-19 20:57:55 +00001683static void arm_smmu_get_resv_regions(struct device *dev,
1684 struct list_head *head)
1685{
1686 struct iommu_resv_region *region;
1687 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1688
1689 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00001690 prot, IOMMU_RESV_SW_MSI);
Eric Augerf3ebee82017-01-19 20:57:55 +00001691 if (!region)
1692 return;
1693
1694 list_add_tail(&region->list, head);
Robin Murphy273df962017-03-16 17:00:19 +00001695
1696 iommu_dma_get_resv_regions(dev, head);
Eric Augerf3ebee82017-01-19 20:57:55 +00001697}
1698
1699static void arm_smmu_put_resv_regions(struct device *dev,
1700 struct list_head *head)
1701{
1702 struct iommu_resv_region *entry, *next;
1703
1704 list_for_each_entry_safe(entry, next, head, list)
1705 kfree(entry);
1706}
1707
Will Deacon518f7132014-11-14 17:17:54 +00001708static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001709 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001710 .domain_alloc = arm_smmu_domain_alloc,
1711 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001712 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001713 .map = arm_smmu_map,
1714 .unmap = arm_smmu_unmap,
Joerg Roedel76771c92014-12-02 13:07:13 +01001715 .map_sg = default_iommu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01001716 .iova_to_phys = arm_smmu_iova_to_phys,
1717 .add_device = arm_smmu_add_device,
1718 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001719 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001720 .domain_get_attr = arm_smmu_domain_get_attr,
1721 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy021bb842016-09-14 15:26:46 +01001722 .of_xlate = arm_smmu_of_xlate,
Eric Augerf3ebee82017-01-19 20:57:55 +00001723 .get_resv_regions = arm_smmu_get_resv_regions,
1724 .put_resv_regions = arm_smmu_put_resv_regions,
Will Deacon518f7132014-11-14 17:17:54 +00001725 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001726};
1727
1728static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1729{
1730 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001731 void __iomem *cb_base;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001732 int i;
Peng Fan3ca37122016-05-03 21:50:30 +08001733 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001734
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001735 /* clear global FSR */
1736 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1737 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001738
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001739 /*
1740 * Reset stream mapping groups: Initial values mark all SMRn as
1741 * invalid and all S2CRn as bypass unless overridden.
1742 */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001743 for (i = 0; i < smmu->num_mapping_groups; ++i)
1744 arm_smmu_write_sme(smmu, i);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001745
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301746 if (smmu->model == ARM_MMU500) {
1747 /*
1748 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1749 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1750 * bit is only present in MMU-500r2 onwards.
1751 */
1752 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1753 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
Peng Fan3ca37122016-05-03 21:50:30 +08001754 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301755 if (major >= 2)
1756 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1757 /*
1758 * Allow unmatched Stream IDs to allocate bypass
1759 * TLB entries for reduced latency.
1760 */
1761 reg |= ARM_MMU500_ACR_SMTNMB_TLBEN;
Peng Fan3ca37122016-05-03 21:50:30 +08001762 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1763 }
1764
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001765 /* Make sure all context banks are disabled and clear CB_FSR */
1766 for (i = 0; i < smmu->num_context_banks; ++i) {
Robin Murphy452107c2017-03-30 17:56:30 +01001767 cb_base = ARM_SMMU_CB(smmu, i);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001768 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1769 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001770 /*
1771 * Disable MMU-500's not-particularly-beneficial next-page
1772 * prefetcher for the sake of errata #841119 and #826419.
1773 */
1774 if (smmu->model == ARM_MMU500) {
1775 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1776 reg &= ~ARM_MMU500_ACTLR_CPRE;
1777 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1778 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001779 }
Will Deacon1463fe42013-07-31 19:21:27 +01001780
Will Deacon45ae7cf2013-06-24 18:31:25 +01001781 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001782 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1783 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1784
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001785 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001786
Will Deacon45ae7cf2013-06-24 18:31:25 +01001787 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001788 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001789
1790 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001791 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001792
Robin Murphy25a1c962016-02-10 14:25:33 +00001793 /* Enable client access, handling unmatched streams as appropriate */
1794 reg &= ~sCR0_CLIENTPD;
1795 if (disable_bypass)
1796 reg |= sCR0_USFCFG;
1797 else
1798 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001799
1800 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001801 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001802
1803 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001804 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001805
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001806 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1807 reg |= sCR0_VMID16EN;
1808
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001809 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1810 reg |= sCR0_EXIDENABLE;
1811
Will Deacon45ae7cf2013-06-24 18:31:25 +01001812 /* Push the button */
Robin Murphy11febfc2017-03-30 17:56:31 +01001813 arm_smmu_tlb_sync_global(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001814 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001815}
1816
1817static int arm_smmu_id_size_to_bits(int size)
1818{
1819 switch (size) {
1820 case 0:
1821 return 32;
1822 case 1:
1823 return 36;
1824 case 2:
1825 return 40;
1826 case 3:
1827 return 42;
1828 case 4:
1829 return 44;
1830 case 5:
1831 default:
1832 return 48;
1833 }
1834}
1835
1836static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1837{
1838 unsigned long size;
1839 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1840 u32 id;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001841 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001842 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001843
1844 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001845 dev_notice(smmu->dev, "SMMUv%d with:\n",
1846 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001847
1848 /* ID0 */
1849 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001850
1851 /* Restrict available stages based on module parameter */
1852 if (force_stage == 1)
1853 id &= ~(ID0_S2TS | ID0_NTS);
1854 else if (force_stage == 2)
1855 id &= ~(ID0_S1TS | ID0_NTS);
1856
Will Deacon45ae7cf2013-06-24 18:31:25 +01001857 if (id & ID0_S1TS) {
1858 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1859 dev_notice(smmu->dev, "\tstage 1 translation\n");
1860 }
1861
1862 if (id & ID0_S2TS) {
1863 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1864 dev_notice(smmu->dev, "\tstage 2 translation\n");
1865 }
1866
1867 if (id & ID0_NTS) {
1868 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1869 dev_notice(smmu->dev, "\tnested translation\n");
1870 }
1871
1872 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001873 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001874 dev_err(smmu->dev, "\tno translation support!\n");
1875 return -ENODEV;
1876 }
1877
Robin Murphyb7862e32016-04-13 18:13:03 +01001878 if ((id & ID0_S1TS) &&
1879 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001880 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1881 dev_notice(smmu->dev, "\taddress translation ops\n");
1882 }
1883
Robin Murphybae2c2d2015-07-29 19:46:05 +01001884 /*
1885 * In order for DMA API calls to work properly, we must defer to what
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001886 * the FW says about coherency, regardless of what the hardware claims.
Robin Murphybae2c2d2015-07-29 19:46:05 +01001887 * Fortunately, this also opens up a workaround for systems where the
1888 * ID register value has ended up configured incorrectly.
1889 */
Robin Murphybae2c2d2015-07-29 19:46:05 +01001890 cttw_reg = !!(id & ID0_CTTW);
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001891 if (cttw_fw || cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001892 dev_notice(smmu->dev, "\t%scoherent table walk\n",
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001893 cttw_fw ? "" : "non-");
1894 if (cttw_fw != cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001895 dev_notice(smmu->dev,
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001896 "\t(IDR0.CTTW overridden by FW configuration)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001897
Robin Murphy21174242016-09-12 17:13:48 +01001898 /* Max. number of entries we have for stream matching/indexing */
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001899 if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
1900 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1901 size = 1 << 16;
1902 } else {
1903 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
1904 }
Robin Murphy21174242016-09-12 17:13:48 +01001905 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001906 if (id & ID0_SMS) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001907 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy21174242016-09-12 17:13:48 +01001908 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
1909 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001910 dev_err(smmu->dev,
1911 "stream-matching supported, but no SMRs present!\n");
1912 return -ENODEV;
1913 }
1914
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001915 /* Zero-initialised to mark as invalid */
1916 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1917 GFP_KERNEL);
1918 if (!smmu->smrs)
1919 return -ENOMEM;
1920
Will Deacon45ae7cf2013-06-24 18:31:25 +01001921 dev_notice(smmu->dev,
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001922 "\tstream matching with %lu register groups", size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001923 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001924 /* s2cr->type == 0 means translation, so initialise explicitly */
1925 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1926 GFP_KERNEL);
1927 if (!smmu->s2crs)
1928 return -ENOMEM;
1929 for (i = 0; i < size; i++)
1930 smmu->s2crs[i] = s2cr_init_val;
1931
Robin Murphy21174242016-09-12 17:13:48 +01001932 smmu->num_mapping_groups = size;
Robin Murphy588888a2016-09-12 17:13:54 +01001933 mutex_init(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001934
Robin Murphy7602b872016-04-28 17:12:09 +01001935 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1936 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1937 if (!(id & ID0_PTFS_NO_AARCH32S))
1938 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1939 }
1940
Will Deacon45ae7cf2013-06-24 18:31:25 +01001941 /* ID1 */
1942 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001943 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001944
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001945 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001946 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Robin Murphy452107c2017-03-30 17:56:30 +01001947 size <<= smmu->pgshift;
1948 if (smmu->cb_base != gr0_base + size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001949 dev_warn(smmu->dev,
Robin Murphy452107c2017-03-30 17:56:30 +01001950 "SMMU address space size (0x%lx) differs from mapped region size (0x%tx)!\n",
1951 size * 2, (smmu->cb_base - gr0_base) * 2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001952
Will Deacon518f7132014-11-14 17:17:54 +00001953 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001954 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1955 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1956 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1957 return -ENODEV;
1958 }
1959 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1960 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001961 /*
1962 * Cavium CN88xx erratum #27704.
1963 * Ensure ASID and VMID allocation is unique across all SMMUs in
1964 * the system.
1965 */
1966 if (smmu->model == CAVIUM_SMMUV2) {
1967 smmu->cavium_id_base =
1968 atomic_add_return(smmu->num_context_banks,
1969 &cavium_smmu_context_count);
1970 smmu->cavium_id_base -= smmu->num_context_banks;
Robert Richter53c35dce2017-03-13 11:39:01 +01001971 dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
Robin Murphye086d912016-04-13 18:12:58 +01001972 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001973
1974 /* ID2 */
1975 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1976 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001977 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001978
Will Deacon518f7132014-11-14 17:17:54 +00001979 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001980 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001981 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001982
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001983 if (id & ID2_VMID16)
1984 smmu->features |= ARM_SMMU_FEAT_VMID16;
1985
Robin Murphyf1d84542015-03-04 16:41:05 +00001986 /*
1987 * What the page table walker can address actually depends on which
1988 * descriptor format is in use, but since a) we don't know that yet,
1989 * and b) it can vary per context bank, this will have to do...
1990 */
1991 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1992 dev_warn(smmu->dev,
1993 "failed to set DMA mask for table walker\n");
1994
Robin Murphyb7862e32016-04-13 18:13:03 +01001995 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001996 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001997 if (smmu->version == ARM_SMMU_V1_64K)
1998 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001999 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002000 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00002001 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00002002 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01002003 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00002004 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01002005 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00002006 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01002007 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002008 }
2009
Robin Murphy7602b872016-04-28 17:12:09 +01002010 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01002011 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01002012 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01002013 if (smmu->features &
2014 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01002015 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01002016 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01002017 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01002018 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01002019 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01002020
Robin Murphyd5466352016-05-09 17:20:09 +01002021 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2022 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2023 else
2024 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2025 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
2026 smmu->pgsize_bitmap);
2027
Will Deacon518f7132014-11-14 17:17:54 +00002028
Will Deacon28d60072014-09-01 16:24:48 +01002029 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
2030 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002031 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002032
2033 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
2034 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002035 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002036
Will Deacon45ae7cf2013-06-24 18:31:25 +01002037 return 0;
2038}
2039
Robin Murphy67b65a32016-04-13 18:12:57 +01002040struct arm_smmu_match_data {
2041 enum arm_smmu_arch_version version;
2042 enum arm_smmu_implementation model;
2043};
2044
2045#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
2046static struct arm_smmu_match_data name = { .version = ver, .model = imp }
2047
2048ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
2049ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01002050ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002051ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01002052ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01002053
Joerg Roedel09b52692014-10-02 12:24:45 +02002054static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01002055 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
2056 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
2057 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01002058 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002059 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01002060 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01002061 { },
2062};
2063MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2064
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002065#ifdef CONFIG_ACPI
2066static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
2067{
2068 int ret = 0;
2069
2070 switch (model) {
2071 case ACPI_IORT_SMMU_V1:
2072 case ACPI_IORT_SMMU_CORELINK_MMU400:
2073 smmu->version = ARM_SMMU_V1;
2074 smmu->model = GENERIC_SMMU;
2075 break;
2076 case ACPI_IORT_SMMU_V2:
2077 smmu->version = ARM_SMMU_V2;
2078 smmu->model = GENERIC_SMMU;
2079 break;
2080 case ACPI_IORT_SMMU_CORELINK_MMU500:
2081 smmu->version = ARM_SMMU_V2;
2082 smmu->model = ARM_MMU500;
2083 break;
2084 default:
2085 ret = -ENODEV;
2086 }
2087
2088 return ret;
2089}
2090
2091static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2092 struct arm_smmu_device *smmu)
2093{
2094 struct device *dev = smmu->dev;
2095 struct acpi_iort_node *node =
2096 *(struct acpi_iort_node **)dev_get_platdata(dev);
2097 struct acpi_iort_smmu *iort_smmu;
2098 int ret;
2099
2100 /* Retrieve SMMU1/2 specific data */
2101 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
2102
2103 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
2104 if (ret < 0)
2105 return ret;
2106
2107 /* Ignore the configuration access interrupt */
2108 smmu->num_global_irqs = 1;
2109
2110 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
2111 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2112
2113 return 0;
2114}
2115#else
2116static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2117 struct arm_smmu_device *smmu)
2118{
2119 return -ENODEV;
2120}
2121#endif
2122
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002123static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2124 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002125{
Robin Murphy67b65a32016-04-13 18:12:57 +01002126 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002127 struct device *dev = &pdev->dev;
Robin Murphy021bb842016-09-14 15:26:46 +01002128 bool legacy_binding;
2129
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002130 if (of_property_read_u32(dev->of_node, "#global-interrupts",
2131 &smmu->num_global_irqs)) {
2132 dev_err(dev, "missing #global-interrupts property\n");
2133 return -ENODEV;
2134 }
2135
2136 data = of_device_get_match_data(dev);
2137 smmu->version = data->version;
2138 smmu->model = data->model;
2139
2140 parse_driver_options(smmu);
2141
Robin Murphy021bb842016-09-14 15:26:46 +01002142 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
2143 if (legacy_binding && !using_generic_binding) {
2144 if (!using_legacy_binding)
2145 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
2146 using_legacy_binding = true;
2147 } else if (!legacy_binding && !using_legacy_binding) {
2148 using_generic_binding = true;
2149 } else {
2150 dev_err(dev, "not probing due to mismatched DT properties\n");
2151 return -ENODEV;
2152 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002153
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002154 if (of_dma_is_coherent(dev->of_node))
2155 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2156
2157 return 0;
2158}
2159
Robin Murphyf6810c12017-04-10 16:51:05 +05302160static void arm_smmu_bus_init(void)
2161{
2162 /* Oh, for a proper bus abstraction */
2163 if (!iommu_present(&platform_bus_type))
2164 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2165#ifdef CONFIG_ARM_AMBA
2166 if (!iommu_present(&amba_bustype))
2167 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2168#endif
2169#ifdef CONFIG_PCI
2170 if (!iommu_present(&pci_bus_type)) {
2171 pci_request_acs();
2172 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2173 }
2174#endif
2175}
2176
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002177static int arm_smmu_device_probe(struct platform_device *pdev)
2178{
2179 struct resource *res;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002180 resource_size_t ioaddr;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002181 struct arm_smmu_device *smmu;
2182 struct device *dev = &pdev->dev;
2183 int num_irqs, i, err;
2184
Will Deacon45ae7cf2013-06-24 18:31:25 +01002185 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2186 if (!smmu) {
2187 dev_err(dev, "failed to allocate arm_smmu_device\n");
2188 return -ENOMEM;
2189 }
2190 smmu->dev = dev;
2191
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002192 if (dev->of_node)
2193 err = arm_smmu_device_dt_probe(pdev, smmu);
2194 else
2195 err = arm_smmu_device_acpi_probe(pdev, smmu);
2196
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002197 if (err)
2198 return err;
Robin Murphy09360402014-08-28 17:51:59 +01002199
Will Deacon45ae7cf2013-06-24 18:31:25 +01002200 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002201 ioaddr = res->start;
Julia Lawall8a7f4312013-08-19 12:20:37 +01002202 smmu->base = devm_ioremap_resource(dev, res);
2203 if (IS_ERR(smmu->base))
2204 return PTR_ERR(smmu->base);
Robin Murphy452107c2017-03-30 17:56:30 +01002205 smmu->cb_base = smmu->base + resource_size(res) / 2;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002206
Will Deacon45ae7cf2013-06-24 18:31:25 +01002207 num_irqs = 0;
2208 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2209 num_irqs++;
2210 if (num_irqs > smmu->num_global_irqs)
2211 smmu->num_context_irqs++;
2212 }
2213
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002214 if (!smmu->num_context_irqs) {
2215 dev_err(dev, "found %d interrupts but expected at least %d\n",
2216 num_irqs, smmu->num_global_irqs + 1);
2217 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002218 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002219
2220 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
2221 GFP_KERNEL);
2222 if (!smmu->irqs) {
2223 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2224 return -ENOMEM;
2225 }
2226
2227 for (i = 0; i < num_irqs; ++i) {
2228 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002229
Will Deacon45ae7cf2013-06-24 18:31:25 +01002230 if (irq < 0) {
2231 dev_err(dev, "failed to get irq index %d\n", i);
2232 return -ENODEV;
2233 }
2234 smmu->irqs[i] = irq;
2235 }
2236
Olav Haugan3c8766d2014-08-22 17:12:32 -07002237 err = arm_smmu_device_cfg_probe(smmu);
2238 if (err)
2239 return err;
2240
Robin Murphyb7862e32016-04-13 18:13:03 +01002241 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01002242 smmu->num_context_banks != smmu->num_context_irqs) {
2243 dev_err(dev,
2244 "found only %d context interrupt(s) but %d required\n",
2245 smmu->num_context_irqs, smmu->num_context_banks);
Robin Murphyf80cd882016-09-14 15:21:39 +01002246 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002247 }
2248
Will Deacon45ae7cf2013-06-24 18:31:25 +01002249 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08002250 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2251 arm_smmu_global_fault,
2252 IRQF_SHARED,
2253 "arm-smmu global fault",
2254 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002255 if (err) {
2256 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2257 i, smmu->irqs[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01002258 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002259 }
2260 }
2261
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002262 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2263 "smmu.%pa", &ioaddr);
2264 if (err) {
2265 dev_err(dev, "Failed to register iommu in sysfs\n");
2266 return err;
2267 }
2268
2269 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2270 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2271
2272 err = iommu_device_register(&smmu->iommu);
2273 if (err) {
2274 dev_err(dev, "Failed to register iommu\n");
2275 return err;
2276 }
2277
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002278 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01002279 arm_smmu_device_reset(smmu);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03002280 arm_smmu_test_smr_masks(smmu);
Robin Murphy021bb842016-09-14 15:26:46 +01002281
Robin Murphyf6810c12017-04-10 16:51:05 +05302282 /*
2283 * For ACPI and generic DT bindings, an SMMU will be probed before
2284 * any device which might need it, so we want the bus ops in place
2285 * ready to handle default domain setup as soon as any SMMU exists.
2286 */
2287 if (!using_legacy_binding)
2288 arm_smmu_bus_init();
2289
Will Deacon45ae7cf2013-06-24 18:31:25 +01002290 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002291}
2292
Robin Murphyf6810c12017-04-10 16:51:05 +05302293/*
2294 * With the legacy DT binding in play, though, we have no guarantees about
2295 * probe order, but then we're also not doing default domains, so we can
2296 * delay setting bus ops until we're sure every possible SMMU is ready,
2297 * and that way ensure that no add_device() calls get missed.
2298 */
2299static int arm_smmu_legacy_bus_init(void)
2300{
2301 if (using_legacy_binding)
2302 arm_smmu_bus_init();
2303 return 0;
2304}
2305device_initcall_sync(arm_smmu_legacy_bus_init);
2306
Will Deacon45ae7cf2013-06-24 18:31:25 +01002307static int arm_smmu_device_remove(struct platform_device *pdev)
2308{
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002309 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002310
2311 if (!smmu)
2312 return -ENODEV;
2313
Will Deaconecfadb62013-07-31 19:21:28 +01002314 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002315 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002316
Will Deacon45ae7cf2013-06-24 18:31:25 +01002317 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002318 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002319 return 0;
2320}
2321
Will Deacon45ae7cf2013-06-24 18:31:25 +01002322static struct platform_driver arm_smmu_driver = {
2323 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002324 .name = "arm-smmu",
2325 .of_match_table = of_match_ptr(arm_smmu_of_match),
2326 },
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002327 .probe = arm_smmu_device_probe,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002328 .remove = arm_smmu_device_remove,
2329};
Robin Murphyf6810c12017-04-10 16:51:05 +05302330module_platform_driver(arm_smmu_driver);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002331
Robin Murphyf6810c12017-04-10 16:51:05 +05302332IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", NULL);
2333IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", NULL);
2334IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", NULL);
2335IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", NULL);
2336IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", NULL);
2337IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", NULL);
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002338
Will Deacon45ae7cf2013-06-24 18:31:25 +01002339MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2340MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2341MODULE_LICENSE("GPL v2");