blob: 099215bbff89b4fef7f656e894f47ca10fa27802 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +030027 * - Extended Stream ID (16 bit)
Will Deacon45ae7cf2013-06-24 18:31:25 +010028 */
29
30#define pr_fmt(fmt) "arm-smmu: " fmt
31
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +000032#include <linux/acpi.h>
33#include <linux/acpi_iort.h>
Robin Murphy1f3d5ca2016-09-12 17:13:49 +010034#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010035#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000036#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010037#include <linux/dma-mapping.h>
38#include <linux/err.h>
39#include <linux/interrupt.h>
40#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010041#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010042#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000043#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010044#include <linux/module.h>
45#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010046#include <linux/of_address.h>
Robin Murphyd6fc5d92016-09-12 17:13:52 +010047#include <linux/of_device.h>
Robin Murphyadfec2e2016-09-12 17:13:55 +010048#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010049#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010050#include <linux/platform_device.h>
51#include <linux/slab.h>
52#include <linux/spinlock.h>
53
54#include <linux/amba/bus.h>
55
Will Deacon518f7132014-11-14 17:17:54 +000056#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010057
Will Deacon45ae7cf2013-06-24 18:31:25 +010058/* Maximum number of context banks per SMMU */
59#define ARM_SMMU_MAX_CBS 128
60
Will Deacon45ae7cf2013-06-24 18:31:25 +010061/* SMMU global address space */
62#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010063#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010064
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000065/*
66 * SMMU global address space with conditional offset to access secure
67 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
68 * nsGFSYNR0: 0x450)
69 */
70#define ARM_SMMU_GR0_NS(smmu) \
71 ((smmu)->base + \
72 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
73 ? 0x400 : 0))
74
Robin Murphyf9a05f02016-04-13 18:13:01 +010075/*
76 * Some 64-bit registers only make sense to write atomically, but in such
77 * cases all the data relevant to AArch32 formats lies within the lower word,
78 * therefore this actually makes more sense than it might first appear.
79 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010080#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010081#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010082#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010083#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010084#endif
85
Will Deacon45ae7cf2013-06-24 18:31:25 +010086/* Configuration registers */
87#define ARM_SMMU_GR0_sCR0 0x0
88#define sCR0_CLIENTPD (1 << 0)
89#define sCR0_GFRE (1 << 1)
90#define sCR0_GFIE (1 << 2)
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +030091#define sCR0_EXIDENABLE (1 << 3)
Will Deacon45ae7cf2013-06-24 18:31:25 +010092#define sCR0_GCFGFRE (1 << 4)
93#define sCR0_GCFGFIE (1 << 5)
94#define sCR0_USFCFG (1 << 10)
95#define sCR0_VMIDPNE (1 << 11)
96#define sCR0_PTM (1 << 12)
97#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080098#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010099#define sCR0_BSU_SHIFT 14
100#define sCR0_BSU_MASK 0x3
101
Peng Fan3ca37122016-05-03 21:50:30 +0800102/* Auxiliary Configuration register */
103#define ARM_SMMU_GR0_sACR 0x10
104
Will Deacon45ae7cf2013-06-24 18:31:25 +0100105/* Identification registers */
106#define ARM_SMMU_GR0_ID0 0x20
107#define ARM_SMMU_GR0_ID1 0x24
108#define ARM_SMMU_GR0_ID2 0x28
109#define ARM_SMMU_GR0_ID3 0x2c
110#define ARM_SMMU_GR0_ID4 0x30
111#define ARM_SMMU_GR0_ID5 0x34
112#define ARM_SMMU_GR0_ID6 0x38
113#define ARM_SMMU_GR0_ID7 0x3c
114#define ARM_SMMU_GR0_sGFSR 0x48
115#define ARM_SMMU_GR0_sGFSYNR0 0x50
116#define ARM_SMMU_GR0_sGFSYNR1 0x54
117#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100118
119#define ID0_S1TS (1 << 30)
120#define ID0_S2TS (1 << 29)
121#define ID0_NTS (1 << 28)
122#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000123#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100124#define ID0_PTFS_NO_AARCH32 (1 << 25)
125#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100126#define ID0_CTTW (1 << 14)
127#define ID0_NUMIRPT_SHIFT 16
128#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700129#define ID0_NUMSIDB_SHIFT 9
130#define ID0_NUMSIDB_MASK 0xf
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300131#define ID0_EXIDS (1 << 8)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100132#define ID0_NUMSMRG_SHIFT 0
133#define ID0_NUMSMRG_MASK 0xff
134
135#define ID1_PAGESIZE (1 << 31)
136#define ID1_NUMPAGENDXB_SHIFT 28
137#define ID1_NUMPAGENDXB_MASK 7
138#define ID1_NUMS2CB_SHIFT 16
139#define ID1_NUMS2CB_MASK 0xff
140#define ID1_NUMCB_SHIFT 0
141#define ID1_NUMCB_MASK 0xff
142
143#define ID2_OAS_SHIFT 4
144#define ID2_OAS_MASK 0xf
145#define ID2_IAS_SHIFT 0
146#define ID2_IAS_MASK 0xf
147#define ID2_UBS_SHIFT 8
148#define ID2_UBS_MASK 0xf
149#define ID2_PTFS_4K (1 << 12)
150#define ID2_PTFS_16K (1 << 13)
151#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800152#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100153
Peng Fan3ca37122016-05-03 21:50:30 +0800154#define ID7_MAJOR_SHIFT 4
155#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100156
Will Deacon45ae7cf2013-06-24 18:31:25 +0100157/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100158#define ARM_SMMU_GR0_TLBIVMID 0x64
159#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
160#define ARM_SMMU_GR0_TLBIALLH 0x6c
161#define ARM_SMMU_GR0_sTLBGSYNC 0x70
162#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
163#define sTLBGSTATUS_GSACTIVE (1 << 0)
164#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
Robin Murphy8513c892017-03-30 17:56:32 +0100165#define TLB_SPIN_COUNT 10
Will Deacon45ae7cf2013-06-24 18:31:25 +0100166
167/* Stream mapping registers */
168#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
169#define SMR_VALID (1 << 31)
170#define SMR_MASK_SHIFT 16
Will Deacon45ae7cf2013-06-24 18:31:25 +0100171#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100172
173#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
174#define S2CR_CBNDX_SHIFT 0
175#define S2CR_CBNDX_MASK 0xff
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300176#define S2CR_EXIDVALID (1 << 10)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100177#define S2CR_TYPE_SHIFT 16
178#define S2CR_TYPE_MASK 0x3
Robin Murphy8e8b2032016-09-12 17:13:50 +0100179enum arm_smmu_s2cr_type {
180 S2CR_TYPE_TRANS,
181 S2CR_TYPE_BYPASS,
182 S2CR_TYPE_FAULT,
183};
Will Deacon45ae7cf2013-06-24 18:31:25 +0100184
Robin Murphyd3461802016-01-26 18:06:34 +0000185#define S2CR_PRIVCFG_SHIFT 24
Robin Murphy8e8b2032016-09-12 17:13:50 +0100186#define S2CR_PRIVCFG_MASK 0x3
187enum arm_smmu_s2cr_privcfg {
188 S2CR_PRIVCFG_DEFAULT,
189 S2CR_PRIVCFG_DIPAN,
190 S2CR_PRIVCFG_UNPRIV,
191 S2CR_PRIVCFG_PRIV,
192};
Robin Murphyd3461802016-01-26 18:06:34 +0000193
Will Deacon45ae7cf2013-06-24 18:31:25 +0100194/* Context bank attribute registers */
195#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
196#define CBAR_VMID_SHIFT 0
197#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000198#define CBAR_S1_BPSHCFG_SHIFT 8
199#define CBAR_S1_BPSHCFG_MASK 3
200#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100201#define CBAR_S1_MEMATTR_SHIFT 12
202#define CBAR_S1_MEMATTR_MASK 0xf
203#define CBAR_S1_MEMATTR_WB 0xf
204#define CBAR_TYPE_SHIFT 16
205#define CBAR_TYPE_MASK 0x3
206#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
207#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
208#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
209#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
210#define CBAR_IRPTNDX_SHIFT 24
211#define CBAR_IRPTNDX_MASK 0xff
212
213#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
214#define CBA2R_RW64_32BIT (0 << 0)
215#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800216#define CBA2R_VMID_SHIFT 16
217#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100218
219/* Translation context bank */
Robin Murphy452107c2017-03-30 17:56:30 +0100220#define ARM_SMMU_CB(smmu, n) ((smmu)->cb_base + ((n) << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100221
222#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100223#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100224#define ARM_SMMU_CB_RESUME 0x8
225#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100226#define ARM_SMMU_CB_TTBR0 0x20
227#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100228#define ARM_SMMU_CB_TTBCR 0x30
Robin Murphy60705292016-08-11 17:44:06 +0100229#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100230#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000231#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100232#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100233#define ARM_SMMU_CB_FSR 0x58
Robin Murphyf9a05f02016-04-13 18:13:01 +0100234#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100235#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000236#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100237#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000238#define ARM_SMMU_CB_S1_TLBIVAL 0x620
239#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
240#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Robin Murphy11febfc2017-03-30 17:56:31 +0100241#define ARM_SMMU_CB_TLBSYNC 0x7f0
242#define ARM_SMMU_CB_TLBSTATUS 0x7f4
Robin Murphy661d9622015-05-27 17:09:34 +0100243#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000244#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100245
246#define SCTLR_S1_ASIDPNE (1 << 12)
247#define SCTLR_CFCFG (1 << 7)
248#define SCTLR_CFIE (1 << 6)
249#define SCTLR_CFRE (1 << 5)
250#define SCTLR_E (1 << 4)
251#define SCTLR_AFE (1 << 2)
252#define SCTLR_TRE (1 << 1)
253#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100254
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100255#define ARM_MMU500_ACTLR_CPRE (1 << 1)
256
Peng Fan3ca37122016-05-03 21:50:30 +0800257#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
Nipun Gupta6eb18d42016-11-04 15:25:23 +0530258#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
Peng Fan3ca37122016-05-03 21:50:30 +0800259
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000260#define CB_PAR_F (1 << 0)
261
262#define ATSR_ACTIVE (1 << 0)
263
Will Deacon45ae7cf2013-06-24 18:31:25 +0100264#define RESUME_RETRY (0 << 0)
265#define RESUME_TERMINATE (1 << 0)
266
Will Deacon45ae7cf2013-06-24 18:31:25 +0100267#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100268#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Tomasz Nowicki3677a642017-01-16 08:16:07 +0100269#define TTBCR2_AS (1 << 4)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100270
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100271#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100272
273#define FSR_MULTI (1 << 31)
274#define FSR_SS (1 << 30)
275#define FSR_UUT (1 << 8)
276#define FSR_ASF (1 << 7)
277#define FSR_TLBLKF (1 << 6)
278#define FSR_TLBMCF (1 << 5)
279#define FSR_EF (1 << 4)
280#define FSR_PF (1 << 3)
281#define FSR_AFF (1 << 2)
282#define FSR_TF (1 << 1)
283
Mitchel Humpherys29073202014-07-08 09:52:18 -0700284#define FSR_IGN (FSR_AFF | FSR_ASF | \
285 FSR_TLBMCF | FSR_TLBLKF)
286#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100287 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100288
289#define FSYNR0_WNR (1 << 4)
290
Eric Augerf3ebee82017-01-19 20:57:55 +0000291#define MSI_IOVA_BASE 0x8000000
292#define MSI_IOVA_LENGTH 0x100000
293
Will Deacon4cf740b2014-07-14 19:47:39 +0100294static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000295module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100296MODULE_PARM_DESC(force_stage,
297 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000298static bool disable_bypass;
299module_param(disable_bypass, bool, S_IRUGO);
300MODULE_PARM_DESC(disable_bypass,
301 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100302
Robin Murphy09360402014-08-28 17:51:59 +0100303enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100304 ARM_SMMU_V1,
305 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100306 ARM_SMMU_V2,
307};
308
Robin Murphy67b65a32016-04-13 18:12:57 +0100309enum arm_smmu_implementation {
310 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100311 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100312 CAVIUM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100313};
314
Robin Murphy8e8b2032016-09-12 17:13:50 +0100315struct arm_smmu_s2cr {
Robin Murphy588888a2016-09-12 17:13:54 +0100316 struct iommu_group *group;
317 int count;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100318 enum arm_smmu_s2cr_type type;
319 enum arm_smmu_s2cr_privcfg privcfg;
320 u8 cbndx;
321};
322
323#define s2cr_init_val (struct arm_smmu_s2cr){ \
324 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
325}
326
Will Deacon45ae7cf2013-06-24 18:31:25 +0100327struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100328 u16 mask;
329 u16 id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100330 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100331};
332
Will Deacona9a1b0b2014-05-01 18:05:08 +0100333struct arm_smmu_master_cfg {
Robin Murphyf80cd882016-09-14 15:21:39 +0100334 struct arm_smmu_device *smmu;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100335 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100336};
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100337#define INVALID_SMENDX -1
Robin Murphyadfec2e2016-09-12 17:13:55 +0100338#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
339#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000340#define fwspec_smendx(fw, i) \
341 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphyadfec2e2016-09-12 17:13:55 +0100342#define for_each_cfg_sme(fw, i, idx) \
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000343 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100344
345struct arm_smmu_device {
346 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100347
348 void __iomem *base;
Robin Murphy452107c2017-03-30 17:56:30 +0100349 void __iomem *cb_base;
Will Deaconc757e852014-07-30 11:33:25 +0100350 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100351
352#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
353#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
354#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
355#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
356#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000357#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800358#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100359#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
360#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
361#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
362#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
363#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300364#define ARM_SMMU_FEAT_EXIDS (1 << 12)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100365 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000366
367#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
368 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100369 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100370 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100371
372 u32 num_context_banks;
373 u32 num_s2_context_banks;
374 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
375 atomic_t irptndx;
376
377 u32 num_mapping_groups;
Robin Murphy21174242016-09-12 17:13:48 +0100378 u16 streamid_mask;
379 u16 smr_mask_mask;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100380 struct arm_smmu_smr *smrs;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100381 struct arm_smmu_s2cr *s2crs;
Robin Murphy588888a2016-09-12 17:13:54 +0100382 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100383
Will Deacon518f7132014-11-14 17:17:54 +0000384 unsigned long va_size;
385 unsigned long ipa_size;
386 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100387 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100388
389 u32 num_global_irqs;
390 u32 num_context_irqs;
391 unsigned int *irqs;
392
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800393 u32 cavium_id_base; /* Specific to Cavium */
Joerg Roedel9648cbc2017-02-01 18:11:36 +0100394
395 /* IOMMU core code handle */
396 struct iommu_device iommu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100397};
398
Robin Murphy7602b872016-04-28 17:12:09 +0100399enum arm_smmu_context_fmt {
400 ARM_SMMU_CTX_FMT_NONE,
401 ARM_SMMU_CTX_FMT_AARCH64,
402 ARM_SMMU_CTX_FMT_AARCH32_L,
403 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100404};
405
406struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100407 u8 cbndx;
408 u8 irptndx;
Robin Murphy280b6832017-03-30 17:56:29 +0100409 union {
410 u16 asid;
411 u16 vmid;
412 };
Will Deacon45ae7cf2013-06-24 18:31:25 +0100413 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100414 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100415};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100416#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100417
Will Deaconc752ce42014-06-25 22:46:31 +0100418enum arm_smmu_domain_stage {
419 ARM_SMMU_DOMAIN_S1 = 0,
420 ARM_SMMU_DOMAIN_S2,
421 ARM_SMMU_DOMAIN_NESTED,
422};
423
Will Deacon45ae7cf2013-06-24 18:31:25 +0100424struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100425 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000426 struct io_pgtable_ops *pgtbl_ops;
427 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100428 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100429 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000430 struct mutex init_mutex; /* Protects smmu pointer */
Joerg Roedel1d672632015-03-26 13:43:10 +0100431 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100432};
433
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000434struct arm_smmu_option_prop {
435 u32 opt;
436 const char *prop;
437};
438
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800439static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
440
Robin Murphy021bb842016-09-14 15:26:46 +0100441static bool using_legacy_binding, using_generic_binding;
442
Mitchel Humpherys29073202014-07-08 09:52:18 -0700443static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000444 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
445 { 0, NULL},
446};
447
Joerg Roedel1d672632015-03-26 13:43:10 +0100448static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
449{
450 return container_of(dom, struct arm_smmu_domain, domain);
451}
452
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000453static void parse_driver_options(struct arm_smmu_device *smmu)
454{
455 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700456
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000457 do {
458 if (of_property_read_bool(smmu->dev->of_node,
459 arm_smmu_options[i].prop)) {
460 smmu->options |= arm_smmu_options[i].opt;
461 dev_notice(smmu->dev, "option %s\n",
462 arm_smmu_options[i].prop);
463 }
464 } while (arm_smmu_options[++i].opt);
465}
466
Will Deacon8f68f8e2014-07-15 11:27:08 +0100467static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100468{
469 if (dev_is_pci(dev)) {
470 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700471
Will Deacona9a1b0b2014-05-01 18:05:08 +0100472 while (!pci_is_root_bus(bus))
473 bus = bus->parent;
Robin Murphyf80cd882016-09-14 15:21:39 +0100474 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100475 }
476
Robin Murphyf80cd882016-09-14 15:21:39 +0100477 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100478}
479
Robin Murphyf80cd882016-09-14 15:21:39 +0100480static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100481{
Robin Murphyf80cd882016-09-14 15:21:39 +0100482 *((__be32 *)data) = cpu_to_be32(alias);
483 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100484}
485
Robin Murphyf80cd882016-09-14 15:21:39 +0100486static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100487{
Robin Murphyf80cd882016-09-14 15:21:39 +0100488 struct of_phandle_iterator *it = *(void **)data;
489 struct device_node *np = it->node;
490 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100491
Robin Murphyf80cd882016-09-14 15:21:39 +0100492 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
493 "#stream-id-cells", 0)
494 if (it->node == np) {
495 *(void **)data = dev;
496 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700497 }
Robin Murphyf80cd882016-09-14 15:21:39 +0100498 it->node = np;
499 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100500}
501
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100502static struct platform_driver arm_smmu_driver;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100503static struct iommu_ops arm_smmu_ops;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100504
Robin Murphyadfec2e2016-09-12 17:13:55 +0100505static int arm_smmu_register_legacy_master(struct device *dev,
506 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100507{
Robin Murphyadfec2e2016-09-12 17:13:55 +0100508 struct device *smmu_dev;
Robin Murphyf80cd882016-09-14 15:21:39 +0100509 struct device_node *np;
510 struct of_phandle_iterator it;
511 void *data = &it;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100512 u32 *sids;
Robin Murphyf80cd882016-09-14 15:21:39 +0100513 __be32 pci_sid;
514 int err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100515
Robin Murphyf80cd882016-09-14 15:21:39 +0100516 np = dev_get_dev_node(dev);
517 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
518 of_node_put(np);
519 return -ENODEV;
520 }
521
522 it.node = np;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100523 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
524 __find_legacy_master_phandle);
Robin Murphyadfec2e2016-09-12 17:13:55 +0100525 smmu_dev = data;
Robin Murphyf80cd882016-09-14 15:21:39 +0100526 of_node_put(np);
527 if (err == 0)
528 return -ENODEV;
529 if (err < 0)
530 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100531
Robin Murphyf80cd882016-09-14 15:21:39 +0100532 if (dev_is_pci(dev)) {
533 /* "mmu-masters" assumes Stream ID == Requester ID */
534 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
535 &pci_sid);
536 it.cur = &pci_sid;
537 it.cur_count = 1;
538 }
539
Robin Murphyadfec2e2016-09-12 17:13:55 +0100540 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
541 &arm_smmu_ops);
542 if (err)
543 return err;
544
545 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
546 if (!sids)
Robin Murphyf80cd882016-09-14 15:21:39 +0100547 return -ENOMEM;
548
Robin Murphyadfec2e2016-09-12 17:13:55 +0100549 *smmu = dev_get_drvdata(smmu_dev);
550 of_phandle_iterator_args(&it, sids, it.cur_count);
551 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
552 kfree(sids);
553 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100554}
555
556static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
557{
558 int idx;
559
560 do {
561 idx = find_next_zero_bit(map, end, start);
562 if (idx == end)
563 return -ENOSPC;
564 } while (test_and_set_bit(idx, map));
565
566 return idx;
567}
568
569static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
570{
571 clear_bit(idx, map);
572}
573
574/* Wait for any pending TLB invalidations to complete */
Robin Murphy11febfc2017-03-30 17:56:31 +0100575static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
576 void __iomem *sync, void __iomem *status)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100577{
Robin Murphy8513c892017-03-30 17:56:32 +0100578 unsigned int spin_cnt, delay;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100579
Robin Murphy11febfc2017-03-30 17:56:31 +0100580 writel_relaxed(0, sync);
Robin Murphy8513c892017-03-30 17:56:32 +0100581 for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
582 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
583 if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
584 return;
585 cpu_relax();
Will Deacon45ae7cf2013-06-24 18:31:25 +0100586 }
Robin Murphy8513c892017-03-30 17:56:32 +0100587 udelay(delay);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100588 }
Robin Murphy8513c892017-03-30 17:56:32 +0100589 dev_err_ratelimited(smmu->dev,
590 "TLB sync timed out -- SMMU may be deadlocked\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +0100591}
592
Robin Murphy11febfc2017-03-30 17:56:31 +0100593static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
Will Deacon1463fe42013-07-31 19:21:27 +0100594{
Robin Murphy11febfc2017-03-30 17:56:31 +0100595 void __iomem *base = ARM_SMMU_GR0(smmu);
596
597 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
598 base + ARM_SMMU_GR0_sTLBGSTATUS);
Will Deacon518f7132014-11-14 17:17:54 +0000599}
600
Robin Murphy11febfc2017-03-30 17:56:31 +0100601static void arm_smmu_tlb_sync_context(void *cookie)
602{
603 struct arm_smmu_domain *smmu_domain = cookie;
604 struct arm_smmu_device *smmu = smmu_domain->smmu;
605 void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
606
607 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
608 base + ARM_SMMU_CB_TLBSTATUS);
609}
610
611static void arm_smmu_tlb_sync_vmid(void *cookie)
612{
613 struct arm_smmu_domain *smmu_domain = cookie;
614
615 arm_smmu_tlb_sync_global(smmu_domain->smmu);
616}
617
618static void arm_smmu_tlb_inv_context_s1(void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000619{
620 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100621 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy11febfc2017-03-30 17:56:31 +0100622 void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
623
624 writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
625 arm_smmu_tlb_sync_context(cookie);
626}
627
628static void arm_smmu_tlb_inv_context_s2(void *cookie)
629{
630 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100631 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy11febfc2017-03-30 17:56:31 +0100632 void __iomem *base = ARM_SMMU_GR0(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100633
Robin Murphy11febfc2017-03-30 17:56:31 +0100634 writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
635 arm_smmu_tlb_sync_global(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100636}
637
Will Deacon518f7132014-11-14 17:17:54 +0000638static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000639 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000640{
641 struct arm_smmu_domain *smmu_domain = cookie;
642 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000643 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Robin Murphy11febfc2017-03-30 17:56:31 +0100644 void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000645
646 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +0000647 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
648
Robin Murphy7602b872016-04-28 17:12:09 +0100649 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000650 iova &= ~12UL;
Robin Murphy280b6832017-03-30 17:56:29 +0100651 iova |= cfg->asid;
Robin Murphy75df1382015-12-07 18:18:52 +0000652 do {
653 writel_relaxed(iova, reg);
654 iova += granule;
655 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000656 } else {
657 iova >>= 12;
Robin Murphy280b6832017-03-30 17:56:29 +0100658 iova |= (u64)cfg->asid << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000659 do {
660 writeq_relaxed(iova, reg);
661 iova += granule >> 12;
662 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000663 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100664 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000665 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
666 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000667 iova >>= 12;
668 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100669 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000670 iova += granule >> 12;
671 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000672 }
673}
674
Robin Murphy11febfc2017-03-30 17:56:31 +0100675/*
676 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
677 * almost negligible, but the benefit of getting the first one in as far ahead
678 * of the sync as possible is significant, hence we don't just make this a
679 * no-op and set .tlb_sync to arm_smmu_inv_context_s2() as you might think.
680 */
681static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
682 size_t granule, bool leaf, void *cookie)
683{
684 struct arm_smmu_domain *smmu_domain = cookie;
685 void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
686
687 writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
688}
689
690static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = {
691 .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
Will Deacon518f7132014-11-14 17:17:54 +0000692 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
Robin Murphy11febfc2017-03-30 17:56:31 +0100693 .tlb_sync = arm_smmu_tlb_sync_context,
694};
695
696static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = {
697 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
698 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
699 .tlb_sync = arm_smmu_tlb_sync_context,
700};
701
702static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = {
703 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
704 .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync,
705 .tlb_sync = arm_smmu_tlb_sync_vmid,
Will Deacon518f7132014-11-14 17:17:54 +0000706};
707
Will Deacon45ae7cf2013-06-24 18:31:25 +0100708static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
709{
Will Deacon3714ce1d2016-08-05 19:49:45 +0100710 u32 fsr, fsynr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100711 unsigned long iova;
712 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100713 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100714 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
715 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100716 void __iomem *cb_base;
717
Robin Murphy452107c2017-03-30 17:56:30 +0100718 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100719 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
720
721 if (!(fsr & FSR_FAULT))
722 return IRQ_NONE;
723
Will Deacon45ae7cf2013-06-24 18:31:25 +0100724 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Robin Murphyf9a05f02016-04-13 18:13:01 +0100725 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100726
Will Deacon3714ce1d2016-08-05 19:49:45 +0100727 dev_err_ratelimited(smmu->dev,
728 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
729 fsr, iova, fsynr, cfg->cbndx);
730
Will Deacon45ae7cf2013-06-24 18:31:25 +0100731 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100732 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100733}
734
735static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
736{
737 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
738 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000739 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100740
741 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
742 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
743 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
744 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
745
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000746 if (!gfsr)
747 return IRQ_NONE;
748
Will Deacon45ae7cf2013-06-24 18:31:25 +0100749 dev_err_ratelimited(smmu->dev,
750 "Unexpected global fault, this could be serious\n");
751 dev_err_ratelimited(smmu->dev,
752 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
753 gfsr, gfsynr0, gfsynr1, gfsynr2);
754
755 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100756 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100757}
758
Will Deacon518f7132014-11-14 17:17:54 +0000759static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
760 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100761{
Robin Murphy60705292016-08-11 17:44:06 +0100762 u32 reg, reg2;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100763 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100764 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +0100765 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
766 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100767 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100768
Will Deacon45ae7cf2013-06-24 18:31:25 +0100769 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100770 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Robin Murphy452107c2017-03-30 17:56:30 +0100771 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100772
Will Deacon4a1c93c2015-03-04 12:21:03 +0000773 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100774 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
775 reg = CBA2R_RW64_64BIT;
776 else
777 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800778 /* 16-bit VMIDs live in CBA2R */
779 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Robin Murphy280b6832017-03-30 17:56:29 +0100780 reg |= cfg->vmid << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800781
Will Deacon4a1c93c2015-03-04 12:21:03 +0000782 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
783 }
784
Will Deacon45ae7cf2013-06-24 18:31:25 +0100785 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100786 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100787 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700788 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100789
Will Deacon57ca90f2014-02-06 14:59:05 +0000790 /*
791 * Use the weakest shareability/memory types, so they are
792 * overridden by the ttbcr/pte.
793 */
794 if (stage1) {
795 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
796 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800797 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
798 /* 8-bit VMIDs live in CBAR */
Robin Murphy280b6832017-03-30 17:56:29 +0100799 reg |= cfg->vmid << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000800 }
Will Deacon44680ee2014-06-25 11:29:12 +0100801 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100802
Sunil Goutham125458a2017-03-28 16:11:12 +0530803 /*
804 * TTBCR
805 * We must write this before the TTBRs, since it determines the
806 * access behaviour of some fields (in particular, ASID[15:8]).
807 */
808 if (stage1) {
809 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
810 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
811 reg2 = 0;
812 } else {
813 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
814 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
815 reg2 |= TTBCR2_SEP_UPSTREAM;
816 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
817 reg2 |= TTBCR2_AS;
818 }
819 if (smmu->version > ARM_SMMU_V1)
820 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
821 } else {
822 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
823 }
824 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
825
Will Deacon518f7132014-11-14 17:17:54 +0000826 /* TTBRs */
827 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100828 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
829 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
830 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
831 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
832 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
Robin Murphy280b6832017-03-30 17:56:29 +0100833 writel_relaxed(cfg->asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
Robin Murphy60705292016-08-11 17:44:06 +0100834 } else {
835 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Robin Murphy280b6832017-03-30 17:56:29 +0100836 reg64 |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
Robin Murphy60705292016-08-11 17:44:06 +0100837 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
838 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Robin Murphy280b6832017-03-30 17:56:29 +0100839 reg64 |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
Robin Murphy60705292016-08-11 17:44:06 +0100840 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
841 }
Will Deacon518f7132014-11-14 17:17:54 +0000842 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100843 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100844 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +0000845 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100846
Will Deacon518f7132014-11-14 17:17:54 +0000847 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100848 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100849 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
850 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
851 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
852 } else {
853 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
854 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
855 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100856 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Robin Murphy60705292016-08-11 17:44:06 +0100857 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100858 }
859
Will Deacon45ae7cf2013-06-24 18:31:25 +0100860 /* SCTLR */
Robin Murphy60705292016-08-11 17:44:06 +0100861 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100862 if (stage1)
863 reg |= SCTLR_S1_ASIDPNE;
864#ifdef __BIG_ENDIAN
865 reg |= SCTLR_E;
866#endif
Will Deacon25724842013-08-21 13:49:53 +0100867 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100868}
869
870static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100871 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100872{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100873 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000874 unsigned long ias, oas;
875 struct io_pgtable_ops *pgtbl_ops;
876 struct io_pgtable_cfg pgtbl_cfg;
877 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100878 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100879 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy11febfc2017-03-30 17:56:31 +0100880 const struct iommu_gather_ops *tlb_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100881
Will Deacon518f7132014-11-14 17:17:54 +0000882 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100883 if (smmu_domain->smmu)
884 goto out_unlock;
885
Will Deaconc752ce42014-06-25 22:46:31 +0100886 /*
887 * Mapping the requested stage onto what we support is surprisingly
888 * complicated, mainly because the spec allows S1+S2 SMMUs without
889 * support for nested translation. That means we end up with the
890 * following table:
891 *
892 * Requested Supported Actual
893 * S1 N S1
894 * S1 S1+S2 S1
895 * S1 S2 S2
896 * S1 S1 S1
897 * N N N
898 * N S1+S2 S2
899 * N S2 S2
900 * N S1 S1
901 *
902 * Note that you can't actually request stage-2 mappings.
903 */
904 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
905 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
906 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
907 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
908
Robin Murphy7602b872016-04-28 17:12:09 +0100909 /*
910 * Choosing a suitable context format is even more fiddly. Until we
911 * grow some way for the caller to express a preference, and/or move
912 * the decision into the io-pgtable code where it arguably belongs,
913 * just aim for the closest thing to the rest of the system, and hope
914 * that the hardware isn't esoteric enough that we can't assume AArch64
915 * support to be a superset of AArch32 support...
916 */
917 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
918 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100919 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
920 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
921 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
922 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
923 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100924 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
925 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
926 ARM_SMMU_FEAT_FMT_AARCH64_16K |
927 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
928 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
929
930 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
931 ret = -EINVAL;
932 goto out_unlock;
933 }
934
Will Deaconc752ce42014-06-25 22:46:31 +0100935 switch (smmu_domain->stage) {
936 case ARM_SMMU_DOMAIN_S1:
937 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
938 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000939 ias = smmu->va_size;
940 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100941 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000942 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +0100943 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +0000944 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100945 ias = min(ias, 32UL);
946 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +0100947 } else {
948 fmt = ARM_V7S;
949 ias = min(ias, 32UL);
950 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +0100951 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100952 tlb_ops = &arm_smmu_s1_tlb_ops;
Will Deaconc752ce42014-06-25 22:46:31 +0100953 break;
954 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100955 /*
956 * We will likely want to change this if/when KVM gets
957 * involved.
958 */
Will Deaconc752ce42014-06-25 22:46:31 +0100959 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100960 cfg->cbar = CBAR_TYPE_S2_TRANS;
961 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000962 ias = smmu->ipa_size;
963 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100964 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000965 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100966 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000967 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100968 ias = min(ias, 40UL);
969 oas = min(oas, 40UL);
970 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100971 if (smmu->version == ARM_SMMU_V2)
972 tlb_ops = &arm_smmu_s2_tlb_ops_v2;
973 else
974 tlb_ops = &arm_smmu_s2_tlb_ops_v1;
Will Deaconc752ce42014-06-25 22:46:31 +0100975 break;
976 default:
977 ret = -EINVAL;
978 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100979 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100980 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
981 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200982 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100983 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100984
Will Deacon44680ee2014-06-25 11:29:12 +0100985 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100986 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100987 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
988 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100989 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100990 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100991 }
992
Robin Murphy280b6832017-03-30 17:56:29 +0100993 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
994 cfg->vmid = cfg->cbndx + 1 + smmu->cavium_id_base;
995 else
996 cfg->asid = cfg->cbndx + smmu->cavium_id_base;
997
Will Deacon518f7132014-11-14 17:17:54 +0000998 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100999 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001000 .ias = ias,
1001 .oas = oas,
Robin Murphy11febfc2017-03-30 17:56:31 +01001002 .tlb = tlb_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +01001003 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001004 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001005
Will Deacon518f7132014-11-14 17:17:54 +00001006 smmu_domain->smmu = smmu;
1007 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
1008 if (!pgtbl_ops) {
1009 ret = -ENOMEM;
1010 goto out_clear_smmu;
1011 }
1012
Robin Murphyd5466352016-05-09 17:20:09 +01001013 /* Update the domain's page sizes to reflect the page table format */
1014 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Robin Murphy455eb7d2016-09-12 17:13:58 +01001015 domain->geometry.aperture_end = (1UL << ias) - 1;
1016 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +00001017
1018 /* Initialise the context bank with our page table cfg */
1019 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
1020
1021 /*
1022 * Request context fault interrupt. Do this last to avoid the
1023 * handler seeing a half-initialised domain state.
1024 */
Will Deacon44680ee2014-06-25 11:29:12 +01001025 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001026 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
1027 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001028 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001029 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +01001030 cfg->irptndx, irq);
1031 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001032 }
1033
Will Deacon518f7132014-11-14 17:17:54 +00001034 mutex_unlock(&smmu_domain->init_mutex);
1035
1036 /* Publish page table ops for map/unmap */
1037 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001038 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001039
Will Deacon518f7132014-11-14 17:17:54 +00001040out_clear_smmu:
1041 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001042out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001043 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001044 return ret;
1045}
1046
1047static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1048{
Joerg Roedel1d672632015-03-26 13:43:10 +01001049 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001050 struct arm_smmu_device *smmu = smmu_domain->smmu;
1051 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001052 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001053 int irq;
1054
Robin Murphy021bb842016-09-14 15:26:46 +01001055 if (!smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001056 return;
1057
Will Deacon518f7132014-11-14 17:17:54 +00001058 /*
1059 * Disable the context bank and free the page tables before freeing
1060 * it.
1061 */
Robin Murphy452107c2017-03-30 17:56:30 +01001062 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001063 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001064
Will Deacon44680ee2014-06-25 11:29:12 +01001065 if (cfg->irptndx != INVALID_IRPTNDX) {
1066 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001067 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001068 }
1069
Markus Elfring44830b02015-11-06 18:32:41 +01001070 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +01001071 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001072}
1073
Joerg Roedel1d672632015-03-26 13:43:10 +01001074static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001075{
1076 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001077
Robin Murphy9adb9592016-01-26 18:06:36 +00001078 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
Joerg Roedel1d672632015-03-26 13:43:10 +01001079 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001080 /*
1081 * Allocate the domain and initialise some of its data structures.
1082 * We can't really do anything meaningful until we've added a
1083 * master.
1084 */
1085 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1086 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001087 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001088
Robin Murphy021bb842016-09-14 15:26:46 +01001089 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
1090 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +00001091 kfree(smmu_domain);
1092 return NULL;
1093 }
1094
Will Deacon518f7132014-11-14 17:17:54 +00001095 mutex_init(&smmu_domain->init_mutex);
1096 spin_lock_init(&smmu_domain->pgtbl_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001097
1098 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001099}
1100
Joerg Roedel1d672632015-03-26 13:43:10 +01001101static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001102{
Joerg Roedel1d672632015-03-26 13:43:10 +01001103 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001104
1105 /*
1106 * Free the domain resources. We assume that all devices have
1107 * already been detached.
1108 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001109 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001110 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001111 kfree(smmu_domain);
1112}
1113
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001114static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1115{
1116 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyf80cd882016-09-14 15:21:39 +01001117 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001118
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001119 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001120 reg |= SMR_VALID;
1121 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1122}
1123
Robin Murphy8e8b2032016-09-12 17:13:50 +01001124static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1125{
1126 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1127 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1128 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1129 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1130
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001131 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
1132 smmu->smrs[idx].valid)
1133 reg |= S2CR_EXIDVALID;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001134 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1135}
1136
1137static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1138{
1139 arm_smmu_write_s2cr(smmu, idx);
1140 if (smmu->smrs)
1141 arm_smmu_write_smr(smmu, idx);
1142}
1143
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001144/*
1145 * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
1146 * should be called after sCR0 is written.
1147 */
1148static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
1149{
1150 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1151 u32 smr;
1152
1153 if (!smmu->smrs)
1154 return;
1155
1156 /*
1157 * SMR.ID bits may not be preserved if the corresponding MASK
1158 * bits are set, so check each one separately. We can reject
1159 * masters later if they try to claim IDs outside these masks.
1160 */
1161 smr = smmu->streamid_mask << SMR_ID_SHIFT;
1162 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1163 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1164 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
1165
1166 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1167 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1168 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1169 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
1170}
1171
Robin Murphy588888a2016-09-12 17:13:54 +01001172static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001173{
1174 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy588888a2016-09-12 17:13:54 +01001175 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001176
Robin Murphy588888a2016-09-12 17:13:54 +01001177 /* Stream indexing is blissfully easy */
1178 if (!smrs)
1179 return id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001180
Robin Murphy588888a2016-09-12 17:13:54 +01001181 /* Validating SMRs is... less so */
1182 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1183 if (!smrs[i].valid) {
1184 /*
1185 * Note the first free entry we come across, which
1186 * we'll claim in the end if nothing else matches.
1187 */
1188 if (free_idx < 0)
1189 free_idx = i;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001190 continue;
1191 }
Robin Murphy588888a2016-09-12 17:13:54 +01001192 /*
1193 * If the new entry is _entirely_ matched by an existing entry,
1194 * then reuse that, with the guarantee that there also cannot
1195 * be any subsequent conflicting entries. In normal use we'd
1196 * expect simply identical entries for this case, but there's
1197 * no harm in accommodating the generalisation.
1198 */
1199 if ((mask & smrs[i].mask) == mask &&
1200 !((id ^ smrs[i].id) & ~smrs[i].mask))
1201 return i;
1202 /*
1203 * If the new entry has any other overlap with an existing one,
1204 * though, then there always exists at least one stream ID
1205 * which would cause a conflict, and we can't allow that risk.
1206 */
1207 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1208 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001209 }
1210
Robin Murphy588888a2016-09-12 17:13:54 +01001211 return free_idx;
1212}
1213
1214static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1215{
1216 if (--smmu->s2crs[idx].count)
1217 return false;
1218
1219 smmu->s2crs[idx] = s2cr_init_val;
1220 if (smmu->smrs)
1221 smmu->smrs[idx].valid = false;
1222
1223 return true;
1224}
1225
1226static int arm_smmu_master_alloc_smes(struct device *dev)
1227{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001228 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1229 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy588888a2016-09-12 17:13:54 +01001230 struct arm_smmu_device *smmu = cfg->smmu;
1231 struct arm_smmu_smr *smrs = smmu->smrs;
1232 struct iommu_group *group;
1233 int i, idx, ret;
1234
1235 mutex_lock(&smmu->stream_map_mutex);
1236 /* Figure out a viable stream map entry allocation */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001237 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy021bb842016-09-14 15:26:46 +01001238 u16 sid = fwspec->ids[i];
1239 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1240
Robin Murphy588888a2016-09-12 17:13:54 +01001241 if (idx != INVALID_SMENDX) {
1242 ret = -EEXIST;
1243 goto out_err;
1244 }
1245
Robin Murphy021bb842016-09-14 15:26:46 +01001246 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy588888a2016-09-12 17:13:54 +01001247 if (ret < 0)
1248 goto out_err;
1249
1250 idx = ret;
1251 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy021bb842016-09-14 15:26:46 +01001252 smrs[idx].id = sid;
1253 smrs[idx].mask = mask;
Robin Murphy588888a2016-09-12 17:13:54 +01001254 smrs[idx].valid = true;
1255 }
1256 smmu->s2crs[idx].count++;
1257 cfg->smendx[i] = (s16)idx;
1258 }
1259
1260 group = iommu_group_get_for_dev(dev);
1261 if (!group)
1262 group = ERR_PTR(-ENOMEM);
1263 if (IS_ERR(group)) {
1264 ret = PTR_ERR(group);
1265 goto out_err;
1266 }
1267 iommu_group_put(group);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001268
Will Deacon45ae7cf2013-06-24 18:31:25 +01001269 /* It worked! Now, poke the actual hardware */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001270 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001271 arm_smmu_write_sme(smmu, idx);
1272 smmu->s2crs[idx].group = group;
1273 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001274
Robin Murphy588888a2016-09-12 17:13:54 +01001275 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001276 return 0;
1277
Robin Murphy588888a2016-09-12 17:13:54 +01001278out_err:
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001279 while (i--) {
Robin Murphy588888a2016-09-12 17:13:54 +01001280 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001281 cfg->smendx[i] = INVALID_SMENDX;
1282 }
Robin Murphy588888a2016-09-12 17:13:54 +01001283 mutex_unlock(&smmu->stream_map_mutex);
1284 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001285}
1286
Robin Murphyadfec2e2016-09-12 17:13:55 +01001287static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001288{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001289 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1290 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphyd3097e32016-09-12 17:13:53 +01001291 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001292
Robin Murphy588888a2016-09-12 17:13:54 +01001293 mutex_lock(&smmu->stream_map_mutex);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001294 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001295 if (arm_smmu_free_sme(smmu, idx))
1296 arm_smmu_write_sme(smmu, idx);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001297 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001298 }
Robin Murphy588888a2016-09-12 17:13:54 +01001299 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001300}
1301
Will Deacon45ae7cf2013-06-24 18:31:25 +01001302static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphyadfec2e2016-09-12 17:13:55 +01001303 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001304{
Will Deacon44680ee2014-06-25 11:29:12 +01001305 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001306 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1307 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
1308 u8 cbndx = smmu_domain->cfg.cbndx;
Robin Murphy588888a2016-09-12 17:13:54 +01001309 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001310
Robin Murphyadfec2e2016-09-12 17:13:55 +01001311 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy8e8b2032016-09-12 17:13:50 +01001312 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy588888a2016-09-12 17:13:54 +01001313 continue;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001314
Robin Murphy8e8b2032016-09-12 17:13:50 +01001315 s2cr[idx].type = type;
Sricharan Re1989802017-01-06 18:58:15 +05301316 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001317 s2cr[idx].cbndx = cbndx;
1318 arm_smmu_write_s2cr(smmu, idx);
Will Deacon43b412b2014-07-15 11:22:24 +01001319 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001320 return 0;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001321}
1322
Will Deacon45ae7cf2013-06-24 18:31:25 +01001323static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1324{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001325 int ret;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001326 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1327 struct arm_smmu_device *smmu;
Joerg Roedel1d672632015-03-26 13:43:10 +01001328 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001329
Robin Murphyadfec2e2016-09-12 17:13:55 +01001330 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001331 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1332 return -ENXIO;
1333 }
1334
Robin Murphyfba4f8e2016-10-17 12:06:21 +01001335 /*
1336 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1337 * domains between of_xlate() and add_device() - we have no way to cope
1338 * with that, so until ARM gets converted to rely on groups and default
1339 * domains, just say no (but more politely than by dereferencing NULL).
1340 * This should be at least a WARN_ON once that's sorted.
1341 */
1342 if (!fwspec->iommu_priv)
1343 return -ENODEV;
1344
Robin Murphyadfec2e2016-09-12 17:13:55 +01001345 smmu = fwspec_smmu(fwspec);
Will Deacon518f7132014-11-14 17:17:54 +00001346 /* Ensure that the domain is finalised */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001347 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001348 if (ret < 0)
Will Deacon518f7132014-11-14 17:17:54 +00001349 return ret;
1350
Will Deacon45ae7cf2013-06-24 18:31:25 +01001351 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001352 * Sanity check the domain. We don't support domains across
1353 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001354 */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001355 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001356 dev_err(dev,
1357 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphyadfec2e2016-09-12 17:13:55 +01001358 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001359 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001360 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001361
1362 /* Looks ok, so add the device to the domain */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001363 return arm_smmu_domain_add_master(smmu_domain, fwspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001364}
1365
Will Deacon45ae7cf2013-06-24 18:31:25 +01001366static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001367 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001368{
Will Deacon518f7132014-11-14 17:17:54 +00001369 int ret;
1370 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001371 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001372 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001373
Will Deacon518f7132014-11-14 17:17:54 +00001374 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001375 return -ENODEV;
1376
Will Deacon518f7132014-11-14 17:17:54 +00001377 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1378 ret = ops->map(ops, iova, paddr, size, prot);
1379 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1380 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001381}
1382
1383static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1384 size_t size)
1385{
Will Deacon518f7132014-11-14 17:17:54 +00001386 size_t ret;
1387 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001388 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001389 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001390
Will Deacon518f7132014-11-14 17:17:54 +00001391 if (!ops)
1392 return 0;
1393
1394 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1395 ret = ops->unmap(ops, iova, size);
1396 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1397 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001398}
1399
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001400static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1401 dma_addr_t iova)
1402{
Joerg Roedel1d672632015-03-26 13:43:10 +01001403 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001404 struct arm_smmu_device *smmu = smmu_domain->smmu;
1405 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1406 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1407 struct device *dev = smmu->dev;
1408 void __iomem *cb_base;
1409 u32 tmp;
1410 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01001411 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001412
Robin Murphy452107c2017-03-30 17:56:30 +01001413 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001414
Robin Murphy661d9622015-05-27 17:09:34 +01001415 /* ATS1 registers can only be written atomically */
1416 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001417 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001418 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1419 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001420 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001421
1422 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1423 !(tmp & ATSR_ACTIVE), 5, 50)) {
1424 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001425 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001426 &iova);
1427 return ops->iova_to_phys(ops, iova);
1428 }
1429
Robin Murphyf9a05f02016-04-13 18:13:01 +01001430 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001431 if (phys & CB_PAR_F) {
1432 dev_err(dev, "translation fault!\n");
1433 dev_err(dev, "PAR = 0x%llx\n", phys);
1434 return 0;
1435 }
1436
1437 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1438}
1439
Will Deacon45ae7cf2013-06-24 18:31:25 +01001440static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001441 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001442{
Will Deacon518f7132014-11-14 17:17:54 +00001443 phys_addr_t ret;
1444 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001445 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001446 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001447
Will Deacon518f7132014-11-14 17:17:54 +00001448 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001449 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001450
Will Deacon518f7132014-11-14 17:17:54 +00001451 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001452 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1453 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001454 ret = arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001455 } else {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001456 ret = ops->iova_to_phys(ops, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001457 }
1458
Will Deacon518f7132014-11-14 17:17:54 +00001459 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001460
Will Deacon518f7132014-11-14 17:17:54 +00001461 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001462}
1463
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001464static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001465{
Will Deacond0948942014-06-24 17:30:10 +01001466 switch (cap) {
1467 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001468 /*
1469 * Return true here as the SMMU can always send out coherent
1470 * requests.
1471 */
1472 return true;
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001473 case IOMMU_CAP_NOEXEC:
1474 return true;
Will Deacond0948942014-06-24 17:30:10 +01001475 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001476 return false;
Will Deacond0948942014-06-24 17:30:10 +01001477 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001478}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001479
Robin Murphy021bb842016-09-14 15:26:46 +01001480static int arm_smmu_match_node(struct device *dev, void *data)
1481{
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001482 return dev->fwnode == data;
Robin Murphy021bb842016-09-14 15:26:46 +01001483}
1484
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001485static
1486struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
Robin Murphy021bb842016-09-14 15:26:46 +01001487{
1488 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001489 fwnode, arm_smmu_match_node);
Robin Murphy021bb842016-09-14 15:26:46 +01001490 put_device(dev);
1491 return dev ? dev_get_drvdata(dev) : NULL;
1492}
1493
Will Deacon03edb222015-01-19 14:27:33 +00001494static int arm_smmu_add_device(struct device *dev)
1495{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001496 struct arm_smmu_device *smmu;
Robin Murphyf80cd882016-09-14 15:21:39 +01001497 struct arm_smmu_master_cfg *cfg;
Robin Murphy021bb842016-09-14 15:26:46 +01001498 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphyf80cd882016-09-14 15:21:39 +01001499 int i, ret;
1500
Robin Murphy021bb842016-09-14 15:26:46 +01001501 if (using_legacy_binding) {
1502 ret = arm_smmu_register_legacy_master(dev, &smmu);
1503 fwspec = dev->iommu_fwspec;
1504 if (ret)
1505 goto out_free;
Robin Murphy3c117b52016-11-02 17:31:32 +00001506 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001507 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
Robin Murphy021bb842016-09-14 15:26:46 +01001508 } else {
1509 return -ENODEV;
1510 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001511
1512 ret = -EINVAL;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001513 for (i = 0; i < fwspec->num_ids; i++) {
1514 u16 sid = fwspec->ids[i];
Robin Murphy021bb842016-09-14 15:26:46 +01001515 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyf80cd882016-09-14 15:21:39 +01001516
Robin Murphyadfec2e2016-09-12 17:13:55 +01001517 if (sid & ~smmu->streamid_mask) {
Robin Murphyf80cd882016-09-14 15:21:39 +01001518 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy021bb842016-09-14 15:26:46 +01001519 sid, smmu->streamid_mask);
1520 goto out_free;
1521 }
1522 if (mask & ~smmu->smr_mask_mask) {
1523 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
1524 sid, smmu->smr_mask_mask);
Robin Murphyf80cd882016-09-14 15:21:39 +01001525 goto out_free;
1526 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001527 }
Will Deacon03edb222015-01-19 14:27:33 +00001528
Robin Murphyadfec2e2016-09-12 17:13:55 +01001529 ret = -ENOMEM;
1530 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1531 GFP_KERNEL);
1532 if (!cfg)
1533 goto out_free;
1534
1535 cfg->smmu = smmu;
1536 fwspec->iommu_priv = cfg;
1537 while (i--)
1538 cfg->smendx[i] = INVALID_SMENDX;
1539
Robin Murphy588888a2016-09-12 17:13:54 +01001540 ret = arm_smmu_master_alloc_smes(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001541 if (ret)
1542 goto out_free;
1543
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001544 iommu_device_link(&smmu->iommu, dev);
1545
Robin Murphyadfec2e2016-09-12 17:13:55 +01001546 return 0;
Robin Murphyf80cd882016-09-14 15:21:39 +01001547
1548out_free:
Robin Murphyadfec2e2016-09-12 17:13:55 +01001549 if (fwspec)
1550 kfree(fwspec->iommu_priv);
1551 iommu_fwspec_free(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001552 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00001553}
1554
Will Deacon45ae7cf2013-06-24 18:31:25 +01001555static void arm_smmu_remove_device(struct device *dev)
1556{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001557 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001558 struct arm_smmu_master_cfg *cfg;
1559 struct arm_smmu_device *smmu;
1560
Robin Murphy8e8b2032016-09-12 17:13:50 +01001561
Robin Murphyadfec2e2016-09-12 17:13:55 +01001562 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyf80cd882016-09-14 15:21:39 +01001563 return;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001564
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001565 cfg = fwspec->iommu_priv;
1566 smmu = cfg->smmu;
1567
1568 iommu_device_unlink(&smmu->iommu, dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001569 arm_smmu_master_free_smes(fwspec);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001570 iommu_group_remove_device(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001571 kfree(fwspec->iommu_priv);
1572 iommu_fwspec_free(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001573}
1574
Joerg Roedelaf659932015-10-21 23:51:41 +02001575static struct iommu_group *arm_smmu_device_group(struct device *dev)
1576{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001577 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1578 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy588888a2016-09-12 17:13:54 +01001579 struct iommu_group *group = NULL;
1580 int i, idx;
1581
Robin Murphyadfec2e2016-09-12 17:13:55 +01001582 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001583 if (group && smmu->s2crs[idx].group &&
1584 group != smmu->s2crs[idx].group)
1585 return ERR_PTR(-EINVAL);
1586
1587 group = smmu->s2crs[idx].group;
1588 }
1589
1590 if (group)
Robin Murphye1b44cb2016-11-11 17:59:22 +00001591 return iommu_group_ref_get(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001592
1593 if (dev_is_pci(dev))
1594 group = pci_device_group(dev);
1595 else
1596 group = generic_device_group(dev);
1597
Joerg Roedelaf659932015-10-21 23:51:41 +02001598 return group;
1599}
1600
Will Deaconc752ce42014-06-25 22:46:31 +01001601static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1602 enum iommu_attr attr, void *data)
1603{
Joerg Roedel1d672632015-03-26 13:43:10 +01001604 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001605
Will Deacon0834cc22017-01-06 16:28:17 +00001606 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
1607 return -EINVAL;
1608
Will Deaconc752ce42014-06-25 22:46:31 +01001609 switch (attr) {
1610 case DOMAIN_ATTR_NESTING:
1611 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1612 return 0;
1613 default:
1614 return -ENODEV;
1615 }
1616}
1617
1618static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1619 enum iommu_attr attr, void *data)
1620{
Will Deacon518f7132014-11-14 17:17:54 +00001621 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001622 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001623
Will Deacon0834cc22017-01-06 16:28:17 +00001624 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
1625 return -EINVAL;
1626
Will Deacon518f7132014-11-14 17:17:54 +00001627 mutex_lock(&smmu_domain->init_mutex);
1628
Will Deaconc752ce42014-06-25 22:46:31 +01001629 switch (attr) {
1630 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00001631 if (smmu_domain->smmu) {
1632 ret = -EPERM;
1633 goto out_unlock;
1634 }
1635
Will Deaconc752ce42014-06-25 22:46:31 +01001636 if (*(int *)data)
1637 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1638 else
1639 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1640
Will Deacon518f7132014-11-14 17:17:54 +00001641 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001642 default:
Will Deacon518f7132014-11-14 17:17:54 +00001643 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01001644 }
Will Deacon518f7132014-11-14 17:17:54 +00001645
1646out_unlock:
1647 mutex_unlock(&smmu_domain->init_mutex);
1648 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001649}
1650
Robin Murphy021bb842016-09-14 15:26:46 +01001651static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1652{
Robin Murphy56fbf602017-03-31 12:03:33 +01001653 u32 mask, fwid = 0;
Robin Murphy021bb842016-09-14 15:26:46 +01001654
1655 if (args->args_count > 0)
1656 fwid |= (u16)args->args[0];
1657
1658 if (args->args_count > 1)
1659 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
Robin Murphy56fbf602017-03-31 12:03:33 +01001660 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
1661 fwid |= (u16)mask << SMR_MASK_SHIFT;
Robin Murphy021bb842016-09-14 15:26:46 +01001662
1663 return iommu_fwspec_add_ids(dev, &fwid, 1);
1664}
1665
Eric Augerf3ebee82017-01-19 20:57:55 +00001666static void arm_smmu_get_resv_regions(struct device *dev,
1667 struct list_head *head)
1668{
1669 struct iommu_resv_region *region;
1670 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1671
1672 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00001673 prot, IOMMU_RESV_SW_MSI);
Eric Augerf3ebee82017-01-19 20:57:55 +00001674 if (!region)
1675 return;
1676
1677 list_add_tail(&region->list, head);
1678}
1679
1680static void arm_smmu_put_resv_regions(struct device *dev,
1681 struct list_head *head)
1682{
1683 struct iommu_resv_region *entry, *next;
1684
1685 list_for_each_entry_safe(entry, next, head, list)
1686 kfree(entry);
1687}
1688
Will Deacon518f7132014-11-14 17:17:54 +00001689static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001690 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001691 .domain_alloc = arm_smmu_domain_alloc,
1692 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001693 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001694 .map = arm_smmu_map,
1695 .unmap = arm_smmu_unmap,
Joerg Roedel76771c92014-12-02 13:07:13 +01001696 .map_sg = default_iommu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01001697 .iova_to_phys = arm_smmu_iova_to_phys,
1698 .add_device = arm_smmu_add_device,
1699 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001700 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001701 .domain_get_attr = arm_smmu_domain_get_attr,
1702 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy021bb842016-09-14 15:26:46 +01001703 .of_xlate = arm_smmu_of_xlate,
Eric Augerf3ebee82017-01-19 20:57:55 +00001704 .get_resv_regions = arm_smmu_get_resv_regions,
1705 .put_resv_regions = arm_smmu_put_resv_regions,
Will Deacon518f7132014-11-14 17:17:54 +00001706 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001707};
1708
1709static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1710{
1711 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001712 void __iomem *cb_base;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001713 int i;
Peng Fan3ca37122016-05-03 21:50:30 +08001714 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001715
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001716 /* clear global FSR */
1717 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1718 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001719
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001720 /*
1721 * Reset stream mapping groups: Initial values mark all SMRn as
1722 * invalid and all S2CRn as bypass unless overridden.
1723 */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001724 for (i = 0; i < smmu->num_mapping_groups; ++i)
1725 arm_smmu_write_sme(smmu, i);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001726
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301727 if (smmu->model == ARM_MMU500) {
1728 /*
1729 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1730 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1731 * bit is only present in MMU-500r2 onwards.
1732 */
1733 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1734 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
Peng Fan3ca37122016-05-03 21:50:30 +08001735 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301736 if (major >= 2)
1737 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1738 /*
1739 * Allow unmatched Stream IDs to allocate bypass
1740 * TLB entries for reduced latency.
1741 */
1742 reg |= ARM_MMU500_ACR_SMTNMB_TLBEN;
Peng Fan3ca37122016-05-03 21:50:30 +08001743 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1744 }
1745
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001746 /* Make sure all context banks are disabled and clear CB_FSR */
1747 for (i = 0; i < smmu->num_context_banks; ++i) {
Robin Murphy452107c2017-03-30 17:56:30 +01001748 cb_base = ARM_SMMU_CB(smmu, i);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001749 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1750 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001751 /*
1752 * Disable MMU-500's not-particularly-beneficial next-page
1753 * prefetcher for the sake of errata #841119 and #826419.
1754 */
1755 if (smmu->model == ARM_MMU500) {
1756 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1757 reg &= ~ARM_MMU500_ACTLR_CPRE;
1758 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1759 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001760 }
Will Deacon1463fe42013-07-31 19:21:27 +01001761
Will Deacon45ae7cf2013-06-24 18:31:25 +01001762 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001763 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1764 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1765
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001766 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001767
Will Deacon45ae7cf2013-06-24 18:31:25 +01001768 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001769 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001770
1771 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001772 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001773
Robin Murphy25a1c962016-02-10 14:25:33 +00001774 /* Enable client access, handling unmatched streams as appropriate */
1775 reg &= ~sCR0_CLIENTPD;
1776 if (disable_bypass)
1777 reg |= sCR0_USFCFG;
1778 else
1779 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001780
1781 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001782 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001783
1784 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001785 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001786
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001787 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1788 reg |= sCR0_VMID16EN;
1789
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001790 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1791 reg |= sCR0_EXIDENABLE;
1792
Will Deacon45ae7cf2013-06-24 18:31:25 +01001793 /* Push the button */
Robin Murphy11febfc2017-03-30 17:56:31 +01001794 arm_smmu_tlb_sync_global(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001795 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001796}
1797
1798static int arm_smmu_id_size_to_bits(int size)
1799{
1800 switch (size) {
1801 case 0:
1802 return 32;
1803 case 1:
1804 return 36;
1805 case 2:
1806 return 40;
1807 case 3:
1808 return 42;
1809 case 4:
1810 return 44;
1811 case 5:
1812 default:
1813 return 48;
1814 }
1815}
1816
1817static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1818{
1819 unsigned long size;
1820 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1821 u32 id;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001822 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001823 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001824
1825 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001826 dev_notice(smmu->dev, "SMMUv%d with:\n",
1827 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001828
1829 /* ID0 */
1830 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001831
1832 /* Restrict available stages based on module parameter */
1833 if (force_stage == 1)
1834 id &= ~(ID0_S2TS | ID0_NTS);
1835 else if (force_stage == 2)
1836 id &= ~(ID0_S1TS | ID0_NTS);
1837
Will Deacon45ae7cf2013-06-24 18:31:25 +01001838 if (id & ID0_S1TS) {
1839 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1840 dev_notice(smmu->dev, "\tstage 1 translation\n");
1841 }
1842
1843 if (id & ID0_S2TS) {
1844 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1845 dev_notice(smmu->dev, "\tstage 2 translation\n");
1846 }
1847
1848 if (id & ID0_NTS) {
1849 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1850 dev_notice(smmu->dev, "\tnested translation\n");
1851 }
1852
1853 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001854 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001855 dev_err(smmu->dev, "\tno translation support!\n");
1856 return -ENODEV;
1857 }
1858
Robin Murphyb7862e32016-04-13 18:13:03 +01001859 if ((id & ID0_S1TS) &&
1860 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001861 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1862 dev_notice(smmu->dev, "\taddress translation ops\n");
1863 }
1864
Robin Murphybae2c2d2015-07-29 19:46:05 +01001865 /*
1866 * In order for DMA API calls to work properly, we must defer to what
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001867 * the FW says about coherency, regardless of what the hardware claims.
Robin Murphybae2c2d2015-07-29 19:46:05 +01001868 * Fortunately, this also opens up a workaround for systems where the
1869 * ID register value has ended up configured incorrectly.
1870 */
Robin Murphybae2c2d2015-07-29 19:46:05 +01001871 cttw_reg = !!(id & ID0_CTTW);
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001872 if (cttw_fw || cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001873 dev_notice(smmu->dev, "\t%scoherent table walk\n",
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001874 cttw_fw ? "" : "non-");
1875 if (cttw_fw != cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001876 dev_notice(smmu->dev,
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001877 "\t(IDR0.CTTW overridden by FW configuration)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001878
Robin Murphy21174242016-09-12 17:13:48 +01001879 /* Max. number of entries we have for stream matching/indexing */
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001880 if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
1881 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1882 size = 1 << 16;
1883 } else {
1884 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
1885 }
Robin Murphy21174242016-09-12 17:13:48 +01001886 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001887 if (id & ID0_SMS) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001888 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy21174242016-09-12 17:13:48 +01001889 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
1890 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001891 dev_err(smmu->dev,
1892 "stream-matching supported, but no SMRs present!\n");
1893 return -ENODEV;
1894 }
1895
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001896 /* Zero-initialised to mark as invalid */
1897 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1898 GFP_KERNEL);
1899 if (!smmu->smrs)
1900 return -ENOMEM;
1901
Will Deacon45ae7cf2013-06-24 18:31:25 +01001902 dev_notice(smmu->dev,
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001903 "\tstream matching with %lu register groups", size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001904 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001905 /* s2cr->type == 0 means translation, so initialise explicitly */
1906 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1907 GFP_KERNEL);
1908 if (!smmu->s2crs)
1909 return -ENOMEM;
1910 for (i = 0; i < size; i++)
1911 smmu->s2crs[i] = s2cr_init_val;
1912
Robin Murphy21174242016-09-12 17:13:48 +01001913 smmu->num_mapping_groups = size;
Robin Murphy588888a2016-09-12 17:13:54 +01001914 mutex_init(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001915
Robin Murphy7602b872016-04-28 17:12:09 +01001916 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1917 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1918 if (!(id & ID0_PTFS_NO_AARCH32S))
1919 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1920 }
1921
Will Deacon45ae7cf2013-06-24 18:31:25 +01001922 /* ID1 */
1923 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001924 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001925
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001926 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001927 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Robin Murphy452107c2017-03-30 17:56:30 +01001928 size <<= smmu->pgshift;
1929 if (smmu->cb_base != gr0_base + size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001930 dev_warn(smmu->dev,
Robin Murphy452107c2017-03-30 17:56:30 +01001931 "SMMU address space size (0x%lx) differs from mapped region size (0x%tx)!\n",
1932 size * 2, (smmu->cb_base - gr0_base) * 2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001933
Will Deacon518f7132014-11-14 17:17:54 +00001934 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001935 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1936 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1937 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1938 return -ENODEV;
1939 }
1940 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1941 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001942 /*
1943 * Cavium CN88xx erratum #27704.
1944 * Ensure ASID and VMID allocation is unique across all SMMUs in
1945 * the system.
1946 */
1947 if (smmu->model == CAVIUM_SMMUV2) {
1948 smmu->cavium_id_base =
1949 atomic_add_return(smmu->num_context_banks,
1950 &cavium_smmu_context_count);
1951 smmu->cavium_id_base -= smmu->num_context_banks;
Robert Richter53c35dce2017-03-13 11:39:01 +01001952 dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
Robin Murphye086d912016-04-13 18:12:58 +01001953 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001954
1955 /* ID2 */
1956 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1957 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001958 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001959
Will Deacon518f7132014-11-14 17:17:54 +00001960 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001961 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001962 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001963
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001964 if (id & ID2_VMID16)
1965 smmu->features |= ARM_SMMU_FEAT_VMID16;
1966
Robin Murphyf1d84542015-03-04 16:41:05 +00001967 /*
1968 * What the page table walker can address actually depends on which
1969 * descriptor format is in use, but since a) we don't know that yet,
1970 * and b) it can vary per context bank, this will have to do...
1971 */
1972 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1973 dev_warn(smmu->dev,
1974 "failed to set DMA mask for table walker\n");
1975
Robin Murphyb7862e32016-04-13 18:13:03 +01001976 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001977 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001978 if (smmu->version == ARM_SMMU_V1_64K)
1979 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001980 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001981 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00001982 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001983 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001984 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001985 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001986 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001987 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001988 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001989 }
1990
Robin Murphy7602b872016-04-28 17:12:09 +01001991 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01001992 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01001993 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01001994 if (smmu->features &
1995 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01001996 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01001997 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01001998 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01001999 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01002000 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01002001
Robin Murphyd5466352016-05-09 17:20:09 +01002002 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2003 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2004 else
2005 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2006 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
2007 smmu->pgsize_bitmap);
2008
Will Deacon518f7132014-11-14 17:17:54 +00002009
Will Deacon28d60072014-09-01 16:24:48 +01002010 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
2011 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002012 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002013
2014 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
2015 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002016 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002017
Will Deacon45ae7cf2013-06-24 18:31:25 +01002018 return 0;
2019}
2020
Robin Murphy67b65a32016-04-13 18:12:57 +01002021struct arm_smmu_match_data {
2022 enum arm_smmu_arch_version version;
2023 enum arm_smmu_implementation model;
2024};
2025
2026#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
2027static struct arm_smmu_match_data name = { .version = ver, .model = imp }
2028
2029ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
2030ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01002031ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002032ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01002033ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01002034
Joerg Roedel09b52692014-10-02 12:24:45 +02002035static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01002036 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
2037 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
2038 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01002039 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002040 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01002041 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01002042 { },
2043};
2044MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2045
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002046#ifdef CONFIG_ACPI
2047static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
2048{
2049 int ret = 0;
2050
2051 switch (model) {
2052 case ACPI_IORT_SMMU_V1:
2053 case ACPI_IORT_SMMU_CORELINK_MMU400:
2054 smmu->version = ARM_SMMU_V1;
2055 smmu->model = GENERIC_SMMU;
2056 break;
2057 case ACPI_IORT_SMMU_V2:
2058 smmu->version = ARM_SMMU_V2;
2059 smmu->model = GENERIC_SMMU;
2060 break;
2061 case ACPI_IORT_SMMU_CORELINK_MMU500:
2062 smmu->version = ARM_SMMU_V2;
2063 smmu->model = ARM_MMU500;
2064 break;
2065 default:
2066 ret = -ENODEV;
2067 }
2068
2069 return ret;
2070}
2071
2072static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2073 struct arm_smmu_device *smmu)
2074{
2075 struct device *dev = smmu->dev;
2076 struct acpi_iort_node *node =
2077 *(struct acpi_iort_node **)dev_get_platdata(dev);
2078 struct acpi_iort_smmu *iort_smmu;
2079 int ret;
2080
2081 /* Retrieve SMMU1/2 specific data */
2082 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
2083
2084 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
2085 if (ret < 0)
2086 return ret;
2087
2088 /* Ignore the configuration access interrupt */
2089 smmu->num_global_irqs = 1;
2090
2091 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
2092 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2093
2094 return 0;
2095}
2096#else
2097static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2098 struct arm_smmu_device *smmu)
2099{
2100 return -ENODEV;
2101}
2102#endif
2103
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002104static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2105 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002106{
Robin Murphy67b65a32016-04-13 18:12:57 +01002107 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002108 struct device *dev = &pdev->dev;
Robin Murphy021bb842016-09-14 15:26:46 +01002109 bool legacy_binding;
2110
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002111 if (of_property_read_u32(dev->of_node, "#global-interrupts",
2112 &smmu->num_global_irqs)) {
2113 dev_err(dev, "missing #global-interrupts property\n");
2114 return -ENODEV;
2115 }
2116
2117 data = of_device_get_match_data(dev);
2118 smmu->version = data->version;
2119 smmu->model = data->model;
2120
2121 parse_driver_options(smmu);
2122
Robin Murphy021bb842016-09-14 15:26:46 +01002123 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
2124 if (legacy_binding && !using_generic_binding) {
2125 if (!using_legacy_binding)
2126 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
2127 using_legacy_binding = true;
2128 } else if (!legacy_binding && !using_legacy_binding) {
2129 using_generic_binding = true;
2130 } else {
2131 dev_err(dev, "not probing due to mismatched DT properties\n");
2132 return -ENODEV;
2133 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002134
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002135 if (of_dma_is_coherent(dev->of_node))
2136 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2137
2138 return 0;
2139}
2140
2141static int arm_smmu_device_probe(struct platform_device *pdev)
2142{
2143 struct resource *res;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002144 resource_size_t ioaddr;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002145 struct arm_smmu_device *smmu;
2146 struct device *dev = &pdev->dev;
2147 int num_irqs, i, err;
2148
Will Deacon45ae7cf2013-06-24 18:31:25 +01002149 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2150 if (!smmu) {
2151 dev_err(dev, "failed to allocate arm_smmu_device\n");
2152 return -ENOMEM;
2153 }
2154 smmu->dev = dev;
2155
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002156 if (dev->of_node)
2157 err = arm_smmu_device_dt_probe(pdev, smmu);
2158 else
2159 err = arm_smmu_device_acpi_probe(pdev, smmu);
2160
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002161 if (err)
2162 return err;
Robin Murphy09360402014-08-28 17:51:59 +01002163
Will Deacon45ae7cf2013-06-24 18:31:25 +01002164 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002165 ioaddr = res->start;
Julia Lawall8a7f4312013-08-19 12:20:37 +01002166 smmu->base = devm_ioremap_resource(dev, res);
2167 if (IS_ERR(smmu->base))
2168 return PTR_ERR(smmu->base);
Robin Murphy452107c2017-03-30 17:56:30 +01002169 smmu->cb_base = smmu->base + resource_size(res) / 2;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002170
Will Deacon45ae7cf2013-06-24 18:31:25 +01002171 num_irqs = 0;
2172 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2173 num_irqs++;
2174 if (num_irqs > smmu->num_global_irqs)
2175 smmu->num_context_irqs++;
2176 }
2177
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002178 if (!smmu->num_context_irqs) {
2179 dev_err(dev, "found %d interrupts but expected at least %d\n",
2180 num_irqs, smmu->num_global_irqs + 1);
2181 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002182 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002183
2184 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
2185 GFP_KERNEL);
2186 if (!smmu->irqs) {
2187 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2188 return -ENOMEM;
2189 }
2190
2191 for (i = 0; i < num_irqs; ++i) {
2192 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002193
Will Deacon45ae7cf2013-06-24 18:31:25 +01002194 if (irq < 0) {
2195 dev_err(dev, "failed to get irq index %d\n", i);
2196 return -ENODEV;
2197 }
2198 smmu->irqs[i] = irq;
2199 }
2200
Olav Haugan3c8766d2014-08-22 17:12:32 -07002201 err = arm_smmu_device_cfg_probe(smmu);
2202 if (err)
2203 return err;
2204
Robin Murphyb7862e32016-04-13 18:13:03 +01002205 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01002206 smmu->num_context_banks != smmu->num_context_irqs) {
2207 dev_err(dev,
2208 "found only %d context interrupt(s) but %d required\n",
2209 smmu->num_context_irqs, smmu->num_context_banks);
Robin Murphyf80cd882016-09-14 15:21:39 +01002210 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002211 }
2212
Will Deacon45ae7cf2013-06-24 18:31:25 +01002213 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08002214 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2215 arm_smmu_global_fault,
2216 IRQF_SHARED,
2217 "arm-smmu global fault",
2218 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002219 if (err) {
2220 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2221 i, smmu->irqs[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01002222 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002223 }
2224 }
2225
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002226 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2227 "smmu.%pa", &ioaddr);
2228 if (err) {
2229 dev_err(dev, "Failed to register iommu in sysfs\n");
2230 return err;
2231 }
2232
2233 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2234 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2235
2236 err = iommu_device_register(&smmu->iommu);
2237 if (err) {
2238 dev_err(dev, "Failed to register iommu\n");
2239 return err;
2240 }
2241
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002242 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01002243 arm_smmu_device_reset(smmu);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03002244 arm_smmu_test_smr_masks(smmu);
Robin Murphy021bb842016-09-14 15:26:46 +01002245
2246 /* Oh, for a proper bus abstraction */
2247 if (!iommu_present(&platform_bus_type))
2248 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2249#ifdef CONFIG_ARM_AMBA
2250 if (!iommu_present(&amba_bustype))
2251 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2252#endif
2253#ifdef CONFIG_PCI
2254 if (!iommu_present(&pci_bus_type)) {
2255 pci_request_acs();
2256 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2257 }
2258#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01002259 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002260}
2261
2262static int arm_smmu_device_remove(struct platform_device *pdev)
2263{
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002264 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002265
2266 if (!smmu)
2267 return -ENODEV;
2268
Will Deaconecfadb62013-07-31 19:21:28 +01002269 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002270 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002271
Will Deacon45ae7cf2013-06-24 18:31:25 +01002272 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002273 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002274 return 0;
2275}
2276
Will Deacon45ae7cf2013-06-24 18:31:25 +01002277static struct platform_driver arm_smmu_driver = {
2278 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002279 .name = "arm-smmu",
2280 .of_match_table = of_match_ptr(arm_smmu_of_match),
2281 },
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002282 .probe = arm_smmu_device_probe,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002283 .remove = arm_smmu_device_remove,
2284};
2285
2286static int __init arm_smmu_init(void)
2287{
Robin Murphy021bb842016-09-14 15:26:46 +01002288 static bool registered;
2289 int ret = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002290
Robin Murphy021bb842016-09-14 15:26:46 +01002291 if (!registered) {
2292 ret = platform_driver_register(&arm_smmu_driver);
2293 registered = !ret;
Wei Chen112c8982016-06-13 17:20:17 +08002294 }
Robin Murphy021bb842016-09-14 15:26:46 +01002295 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002296}
2297
2298static void __exit arm_smmu_exit(void)
2299{
2300 return platform_driver_unregister(&arm_smmu_driver);
2301}
2302
Andreas Herrmannb1950b22013-10-01 13:39:05 +01002303subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002304module_exit(arm_smmu_exit);
2305
Robin Murphy021bb842016-09-14 15:26:46 +01002306static int __init arm_smmu_of_init(struct device_node *np)
2307{
2308 int ret = arm_smmu_init();
2309
2310 if (ret)
2311 return ret;
2312
2313 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
2314 return -ENODEV;
2315
2316 return 0;
2317}
2318IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
2319IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
2320IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
2321IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
2322IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
2323IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
2324
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002325#ifdef CONFIG_ACPI
2326static int __init arm_smmu_acpi_init(struct acpi_table_header *table)
2327{
2328 if (iort_node_match(ACPI_IORT_NODE_SMMU))
2329 return arm_smmu_init();
2330
2331 return 0;
2332}
2333IORT_ACPI_DECLARE(arm_smmu, ACPI_SIG_IORT, arm_smmu_acpi_init);
2334#endif
2335
Will Deacon45ae7cf2013-06-24 18:31:25 +01002336MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2337MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2338MODULE_LICENSE("GPL v2");