blob: 085fc8d808a5301386c39ddd57fc8e8fd106c63c [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
31#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000032#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010033#include <linux/dma-mapping.h>
34#include <linux/err.h>
35#include <linux/interrupt.h>
36#include <linux/io.h>
37#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000038#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/module.h>
40#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010041#include <linux/of_address.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010042#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010043#include <linux/platform_device.h>
44#include <linux/slab.h>
45#include <linux/spinlock.h>
46
47#include <linux/amba/bus.h>
48
Will Deacon518f7132014-11-14 17:17:54 +000049#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010050
51/* Maximum number of stream IDs assigned to a single device */
Andreas Herrmann636e97b2014-01-30 18:18:08 +000052#define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS
Will Deacon45ae7cf2013-06-24 18:31:25 +010053
54/* Maximum number of context banks per SMMU */
55#define ARM_SMMU_MAX_CBS 128
56
57/* Maximum number of mapping groups per SMMU */
58#define ARM_SMMU_MAX_SMRS 128
59
Will Deacon45ae7cf2013-06-24 18:31:25 +010060/* SMMU global address space */
61#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010062#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010063
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000064/*
65 * SMMU global address space with conditional offset to access secure
66 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
67 * nsGFSYNR0: 0x450)
68 */
69#define ARM_SMMU_GR0_NS(smmu) \
70 ((smmu)->base + \
71 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
72 ? 0x400 : 0))
73
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010074#ifdef CONFIG_64BIT
75#define smmu_writeq writeq_relaxed
76#else
77#define smmu_writeq(reg64, addr) \
78 do { \
79 u64 __val = (reg64); \
80 void __iomem *__addr = (addr); \
81 writel_relaxed(__val >> 32, __addr + 4); \
82 writel_relaxed(__val, __addr); \
83 } while (0)
84#endif
85
Will Deacon45ae7cf2013-06-24 18:31:25 +010086/* Configuration registers */
87#define ARM_SMMU_GR0_sCR0 0x0
88#define sCR0_CLIENTPD (1 << 0)
89#define sCR0_GFRE (1 << 1)
90#define sCR0_GFIE (1 << 2)
91#define sCR0_GCFGFRE (1 << 4)
92#define sCR0_GCFGFIE (1 << 5)
93#define sCR0_USFCFG (1 << 10)
94#define sCR0_VMIDPNE (1 << 11)
95#define sCR0_PTM (1 << 12)
96#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080097#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010098#define sCR0_BSU_SHIFT 14
99#define sCR0_BSU_MASK 0x3
100
101/* Identification registers */
102#define ARM_SMMU_GR0_ID0 0x20
103#define ARM_SMMU_GR0_ID1 0x24
104#define ARM_SMMU_GR0_ID2 0x28
105#define ARM_SMMU_GR0_ID3 0x2c
106#define ARM_SMMU_GR0_ID4 0x30
107#define ARM_SMMU_GR0_ID5 0x34
108#define ARM_SMMU_GR0_ID6 0x38
109#define ARM_SMMU_GR0_ID7 0x3c
110#define ARM_SMMU_GR0_sGFSR 0x48
111#define ARM_SMMU_GR0_sGFSYNR0 0x50
112#define ARM_SMMU_GR0_sGFSYNR1 0x54
113#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100114
115#define ID0_S1TS (1 << 30)
116#define ID0_S2TS (1 << 29)
117#define ID0_NTS (1 << 28)
118#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000119#define ID0_ATOSNS (1 << 26)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100120#define ID0_CTTW (1 << 14)
121#define ID0_NUMIRPT_SHIFT 16
122#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700123#define ID0_NUMSIDB_SHIFT 9
124#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100125#define ID0_NUMSMRG_SHIFT 0
126#define ID0_NUMSMRG_MASK 0xff
127
128#define ID1_PAGESIZE (1 << 31)
129#define ID1_NUMPAGENDXB_SHIFT 28
130#define ID1_NUMPAGENDXB_MASK 7
131#define ID1_NUMS2CB_SHIFT 16
132#define ID1_NUMS2CB_MASK 0xff
133#define ID1_NUMCB_SHIFT 0
134#define ID1_NUMCB_MASK 0xff
135
136#define ID2_OAS_SHIFT 4
137#define ID2_OAS_MASK 0xf
138#define ID2_IAS_SHIFT 0
139#define ID2_IAS_MASK 0xf
140#define ID2_UBS_SHIFT 8
141#define ID2_UBS_MASK 0xf
142#define ID2_PTFS_4K (1 << 12)
143#define ID2_PTFS_16K (1 << 13)
144#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800145#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100146
Will Deacon45ae7cf2013-06-24 18:31:25 +0100147/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100148#define ARM_SMMU_GR0_TLBIVMID 0x64
149#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
150#define ARM_SMMU_GR0_TLBIALLH 0x6c
151#define ARM_SMMU_GR0_sTLBGSYNC 0x70
152#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
153#define sTLBGSTATUS_GSACTIVE (1 << 0)
154#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
155
156/* Stream mapping registers */
157#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
158#define SMR_VALID (1 << 31)
159#define SMR_MASK_SHIFT 16
160#define SMR_MASK_MASK 0x7fff
161#define SMR_ID_SHIFT 0
162#define SMR_ID_MASK 0x7fff
163
164#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
165#define S2CR_CBNDX_SHIFT 0
166#define S2CR_CBNDX_MASK 0xff
167#define S2CR_TYPE_SHIFT 16
168#define S2CR_TYPE_MASK 0x3
169#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
170#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
171#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
172
Robin Murphyd3461802016-01-26 18:06:34 +0000173#define S2CR_PRIVCFG_SHIFT 24
174#define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT)
175
Will Deacon45ae7cf2013-06-24 18:31:25 +0100176/* Context bank attribute registers */
177#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
178#define CBAR_VMID_SHIFT 0
179#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000180#define CBAR_S1_BPSHCFG_SHIFT 8
181#define CBAR_S1_BPSHCFG_MASK 3
182#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100183#define CBAR_S1_MEMATTR_SHIFT 12
184#define CBAR_S1_MEMATTR_MASK 0xf
185#define CBAR_S1_MEMATTR_WB 0xf
186#define CBAR_TYPE_SHIFT 16
187#define CBAR_TYPE_MASK 0x3
188#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
189#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
190#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
191#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
192#define CBAR_IRPTNDX_SHIFT 24
193#define CBAR_IRPTNDX_MASK 0xff
194
195#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
196#define CBA2R_RW64_32BIT (0 << 0)
197#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800198#define CBA2R_VMID_SHIFT 16
199#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100200
201/* Translation context bank */
202#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100203#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100204
205#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100206#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100207#define ARM_SMMU_CB_RESUME 0x8
208#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100209#define ARM_SMMU_CB_TTBR0 0x20
210#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100211#define ARM_SMMU_CB_TTBCR 0x30
212#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000213#define ARM_SMMU_CB_S1_MAIR1 0x3c
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000214#define ARM_SMMU_CB_PAR_LO 0x50
215#define ARM_SMMU_CB_PAR_HI 0x54
Will Deacon45ae7cf2013-06-24 18:31:25 +0100216#define ARM_SMMU_CB_FSR 0x58
217#define ARM_SMMU_CB_FAR_LO 0x60
218#define ARM_SMMU_CB_FAR_HI 0x64
219#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000220#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100221#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000222#define ARM_SMMU_CB_S1_TLBIVAL 0x620
223#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
224#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Robin Murphy661d9622015-05-27 17:09:34 +0100225#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000226#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100227
228#define SCTLR_S1_ASIDPNE (1 << 12)
229#define SCTLR_CFCFG (1 << 7)
230#define SCTLR_CFIE (1 << 6)
231#define SCTLR_CFRE (1 << 5)
232#define SCTLR_E (1 << 4)
233#define SCTLR_AFE (1 << 2)
234#define SCTLR_TRE (1 << 1)
235#define SCTLR_M (1 << 0)
236#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
237
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100238#define ARM_MMU500_ACTLR_CPRE (1 << 1)
239
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000240#define CB_PAR_F (1 << 0)
241
242#define ATSR_ACTIVE (1 << 0)
243
Will Deacon45ae7cf2013-06-24 18:31:25 +0100244#define RESUME_RETRY (0 << 0)
245#define RESUME_TERMINATE (1 << 0)
246
Will Deacon45ae7cf2013-06-24 18:31:25 +0100247#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100248#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100249
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100250#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100251
252#define FSR_MULTI (1 << 31)
253#define FSR_SS (1 << 30)
254#define FSR_UUT (1 << 8)
255#define FSR_ASF (1 << 7)
256#define FSR_TLBLKF (1 << 6)
257#define FSR_TLBMCF (1 << 5)
258#define FSR_EF (1 << 4)
259#define FSR_PF (1 << 3)
260#define FSR_AFF (1 << 2)
261#define FSR_TF (1 << 1)
262
Mitchel Humpherys29073202014-07-08 09:52:18 -0700263#define FSR_IGN (FSR_AFF | FSR_ASF | \
264 FSR_TLBMCF | FSR_TLBLKF)
265#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100266 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100267
268#define FSYNR0_WNR (1 << 4)
269
Will Deacon4cf740b2014-07-14 19:47:39 +0100270static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000271module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100272MODULE_PARM_DESC(force_stage,
273 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000274static bool disable_bypass;
275module_param(disable_bypass, bool, S_IRUGO);
276MODULE_PARM_DESC(disable_bypass,
277 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100278
Robin Murphy09360402014-08-28 17:51:59 +0100279enum arm_smmu_arch_version {
280 ARM_SMMU_V1 = 1,
281 ARM_SMMU_V2,
282};
283
Robin Murphy67b65a32016-04-13 18:12:57 +0100284enum arm_smmu_implementation {
285 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100286 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100287 CAVIUM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100288};
289
Will Deacon45ae7cf2013-06-24 18:31:25 +0100290struct arm_smmu_smr {
291 u8 idx;
292 u16 mask;
293 u16 id;
294};
295
Will Deacona9a1b0b2014-05-01 18:05:08 +0100296struct arm_smmu_master_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100297 int num_streamids;
298 u16 streamids[MAX_MASTER_STREAMIDS];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100299 struct arm_smmu_smr *smrs;
300};
301
Will Deacona9a1b0b2014-05-01 18:05:08 +0100302struct arm_smmu_master {
303 struct device_node *of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100304 struct rb_node node;
305 struct arm_smmu_master_cfg cfg;
306};
307
Will Deacon45ae7cf2013-06-24 18:31:25 +0100308struct arm_smmu_device {
309 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100310
311 void __iomem *base;
312 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100313 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100314
315#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
316#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
317#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
318#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
319#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000320#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800321#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100322 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000323
324#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
325 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100326 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100327 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100328
329 u32 num_context_banks;
330 u32 num_s2_context_banks;
331 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
332 atomic_t irptndx;
333
334 u32 num_mapping_groups;
335 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
336
Will Deacon518f7132014-11-14 17:17:54 +0000337 unsigned long va_size;
338 unsigned long ipa_size;
339 unsigned long pa_size;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100340
341 u32 num_global_irqs;
342 u32 num_context_irqs;
343 unsigned int *irqs;
344
Will Deacon45ae7cf2013-06-24 18:31:25 +0100345 struct list_head list;
346 struct rb_root masters;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800347
348 u32 cavium_id_base; /* Specific to Cavium */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100349};
350
351struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100352 u8 cbndx;
353 u8 irptndx;
354 u32 cbar;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100355};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100356#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100357
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800358#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
359#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100360
Will Deaconc752ce42014-06-25 22:46:31 +0100361enum arm_smmu_domain_stage {
362 ARM_SMMU_DOMAIN_S1 = 0,
363 ARM_SMMU_DOMAIN_S2,
364 ARM_SMMU_DOMAIN_NESTED,
365};
366
Will Deacon45ae7cf2013-06-24 18:31:25 +0100367struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100368 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000369 struct io_pgtable_ops *pgtbl_ops;
370 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100371 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100372 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000373 struct mutex init_mutex; /* Protects smmu pointer */
Joerg Roedel1d672632015-03-26 13:43:10 +0100374 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100375};
376
Will Deacon518f7132014-11-14 17:17:54 +0000377static struct iommu_ops arm_smmu_ops;
378
Will Deacon45ae7cf2013-06-24 18:31:25 +0100379static DEFINE_SPINLOCK(arm_smmu_devices_lock);
380static LIST_HEAD(arm_smmu_devices);
381
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000382struct arm_smmu_option_prop {
383 u32 opt;
384 const char *prop;
385};
386
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800387static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
388
Mitchel Humpherys29073202014-07-08 09:52:18 -0700389static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000390 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
391 { 0, NULL},
392};
393
Joerg Roedel1d672632015-03-26 13:43:10 +0100394static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
395{
396 return container_of(dom, struct arm_smmu_domain, domain);
397}
398
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000399static void parse_driver_options(struct arm_smmu_device *smmu)
400{
401 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700402
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000403 do {
404 if (of_property_read_bool(smmu->dev->of_node,
405 arm_smmu_options[i].prop)) {
406 smmu->options |= arm_smmu_options[i].opt;
407 dev_notice(smmu->dev, "option %s\n",
408 arm_smmu_options[i].prop);
409 }
410 } while (arm_smmu_options[++i].opt);
411}
412
Will Deacon8f68f8e2014-07-15 11:27:08 +0100413static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100414{
415 if (dev_is_pci(dev)) {
416 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700417
Will Deacona9a1b0b2014-05-01 18:05:08 +0100418 while (!pci_is_root_bus(bus))
419 bus = bus->parent;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100420 return bus->bridge->parent->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100421 }
422
Will Deacon8f68f8e2014-07-15 11:27:08 +0100423 return dev->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100424}
425
Will Deacon45ae7cf2013-06-24 18:31:25 +0100426static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
427 struct device_node *dev_node)
428{
429 struct rb_node *node = smmu->masters.rb_node;
430
431 while (node) {
432 struct arm_smmu_master *master;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700433
Will Deacon45ae7cf2013-06-24 18:31:25 +0100434 master = container_of(node, struct arm_smmu_master, node);
435
436 if (dev_node < master->of_node)
437 node = node->rb_left;
438 else if (dev_node > master->of_node)
439 node = node->rb_right;
440 else
441 return master;
442 }
443
444 return NULL;
445}
446
Will Deacona9a1b0b2014-05-01 18:05:08 +0100447static struct arm_smmu_master_cfg *
Will Deacon8f68f8e2014-07-15 11:27:08 +0100448find_smmu_master_cfg(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100449{
Will Deacon8f68f8e2014-07-15 11:27:08 +0100450 struct arm_smmu_master_cfg *cfg = NULL;
451 struct iommu_group *group = iommu_group_get(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100452
Will Deacon8f68f8e2014-07-15 11:27:08 +0100453 if (group) {
454 cfg = iommu_group_get_iommudata(group);
455 iommu_group_put(group);
456 }
Will Deacona9a1b0b2014-05-01 18:05:08 +0100457
Will Deacon8f68f8e2014-07-15 11:27:08 +0100458 return cfg;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100459}
460
Will Deacon45ae7cf2013-06-24 18:31:25 +0100461static int insert_smmu_master(struct arm_smmu_device *smmu,
462 struct arm_smmu_master *master)
463{
464 struct rb_node **new, *parent;
465
466 new = &smmu->masters.rb_node;
467 parent = NULL;
468 while (*new) {
Mitchel Humpherys29073202014-07-08 09:52:18 -0700469 struct arm_smmu_master *this
470 = container_of(*new, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100471
472 parent = *new;
473 if (master->of_node < this->of_node)
474 new = &((*new)->rb_left);
475 else if (master->of_node > this->of_node)
476 new = &((*new)->rb_right);
477 else
478 return -EEXIST;
479 }
480
481 rb_link_node(&master->node, parent, new);
482 rb_insert_color(&master->node, &smmu->masters);
483 return 0;
484}
485
486static int register_smmu_master(struct arm_smmu_device *smmu,
487 struct device *dev,
488 struct of_phandle_args *masterspec)
489{
490 int i;
491 struct arm_smmu_master *master;
492
493 master = find_smmu_master(smmu, masterspec->np);
494 if (master) {
495 dev_err(dev,
496 "rejecting multiple registrations for master device %s\n",
497 masterspec->np->name);
498 return -EBUSY;
499 }
500
501 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
502 dev_err(dev,
503 "reached maximum number (%d) of stream IDs for master device %s\n",
504 MAX_MASTER_STREAMIDS, masterspec->np->name);
505 return -ENOSPC;
506 }
507
508 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
509 if (!master)
510 return -ENOMEM;
511
Will Deacona9a1b0b2014-05-01 18:05:08 +0100512 master->of_node = masterspec->np;
513 master->cfg.num_streamids = masterspec->args_count;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100514
Olav Haugan3c8766d2014-08-22 17:12:32 -0700515 for (i = 0; i < master->cfg.num_streamids; ++i) {
516 u16 streamid = masterspec->args[i];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100517
Olav Haugan3c8766d2014-08-22 17:12:32 -0700518 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
519 (streamid >= smmu->num_mapping_groups)) {
520 dev_err(dev,
521 "stream ID for master device %s greater than maximum allowed (%d)\n",
522 masterspec->np->name, smmu->num_mapping_groups);
523 return -ERANGE;
524 }
525 master->cfg.streamids[i] = streamid;
526 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100527 return insert_smmu_master(smmu, master);
528}
529
Will Deacon44680ee2014-06-25 11:29:12 +0100530static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100531{
Will Deacon44680ee2014-06-25 11:29:12 +0100532 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100533 struct arm_smmu_master *master = NULL;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100534 struct device_node *dev_node = dev_get_dev_node(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100535
536 spin_lock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100537 list_for_each_entry(smmu, &arm_smmu_devices, list) {
Will Deacona9a1b0b2014-05-01 18:05:08 +0100538 master = find_smmu_master(smmu, dev_node);
539 if (master)
540 break;
541 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100542 spin_unlock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100543
Will Deacona9a1b0b2014-05-01 18:05:08 +0100544 return master ? smmu : NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100545}
546
547static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
548{
549 int idx;
550
551 do {
552 idx = find_next_zero_bit(map, end, start);
553 if (idx == end)
554 return -ENOSPC;
555 } while (test_and_set_bit(idx, map));
556
557 return idx;
558}
559
560static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
561{
562 clear_bit(idx, map);
563}
564
565/* Wait for any pending TLB invalidations to complete */
Will Deacon518f7132014-11-14 17:17:54 +0000566static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100567{
568 int count = 0;
569 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
570
571 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
572 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
573 & sTLBGSTATUS_GSACTIVE) {
574 cpu_relax();
575 if (++count == TLB_LOOP_TIMEOUT) {
576 dev_err_ratelimited(smmu->dev,
577 "TLB sync timed out -- SMMU may be deadlocked\n");
578 return;
579 }
580 udelay(1);
581 }
582}
583
Will Deacon518f7132014-11-14 17:17:54 +0000584static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100585{
Will Deacon518f7132014-11-14 17:17:54 +0000586 struct arm_smmu_domain *smmu_domain = cookie;
587 __arm_smmu_tlb_sync(smmu_domain->smmu);
588}
589
590static void arm_smmu_tlb_inv_context(void *cookie)
591{
592 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100593 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
594 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100595 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000596 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100597
598 if (stage1) {
599 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800600 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100601 base + ARM_SMMU_CB_S1_TLBIASID);
Will Deacon1463fe42013-07-31 19:21:27 +0100602 } else {
603 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800604 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100605 base + ARM_SMMU_GR0_TLBIVMID);
Will Deacon1463fe42013-07-31 19:21:27 +0100606 }
607
Will Deacon518f7132014-11-14 17:17:54 +0000608 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100609}
610
Will Deacon518f7132014-11-14 17:17:54 +0000611static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000612 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000613{
614 struct arm_smmu_domain *smmu_domain = cookie;
615 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
616 struct arm_smmu_device *smmu = smmu_domain->smmu;
617 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
618 void __iomem *reg;
619
620 if (stage1) {
621 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
622 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
623
624 if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) {
625 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800626 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +0000627 do {
628 writel_relaxed(iova, reg);
629 iova += granule;
630 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000631#ifdef CONFIG_64BIT
632 } else {
633 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800634 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000635 do {
636 writeq_relaxed(iova, reg);
637 iova += granule >> 12;
638 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000639#endif
640 }
641#ifdef CONFIG_64BIT
642 } else if (smmu->version == ARM_SMMU_V2) {
643 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
644 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
645 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000646 iova >>= 12;
647 do {
648 writeq_relaxed(iova, reg);
649 iova += granule >> 12;
650 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000651#endif
652 } else {
653 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800654 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +0000655 }
656}
657
Will Deacon518f7132014-11-14 17:17:54 +0000658static struct iommu_gather_ops arm_smmu_gather_ops = {
659 .tlb_flush_all = arm_smmu_tlb_inv_context,
660 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
661 .tlb_sync = arm_smmu_tlb_sync,
Will Deacon518f7132014-11-14 17:17:54 +0000662};
663
Will Deacon45ae7cf2013-06-24 18:31:25 +0100664static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
665{
666 int flags, ret;
667 u32 fsr, far, fsynr, resume;
668 unsigned long iova;
669 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100670 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100671 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
672 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100673 void __iomem *cb_base;
674
Will Deacon44680ee2014-06-25 11:29:12 +0100675 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100676 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
677
678 if (!(fsr & FSR_FAULT))
679 return IRQ_NONE;
680
681 if (fsr & FSR_IGN)
682 dev_err_ratelimited(smmu->dev,
Hans Wennborg70c9a7d2014-08-06 05:42:01 +0100683 "Unexpected context fault (fsr 0x%x)\n",
Will Deacon45ae7cf2013-06-24 18:31:25 +0100684 fsr);
685
686 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
687 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
688
689 far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_LO);
690 iova = far;
691#ifdef CONFIG_64BIT
692 far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_HI);
693 iova |= ((unsigned long)far << 32);
694#endif
695
696 if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
697 ret = IRQ_HANDLED;
698 resume = RESUME_RETRY;
699 } else {
Andreas Herrmann2ef0f032013-10-01 13:39:08 +0100700 dev_err_ratelimited(smmu->dev,
701 "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100702 iova, fsynr, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100703 ret = IRQ_NONE;
704 resume = RESUME_TERMINATE;
705 }
706
707 /* Clear the faulting FSR */
708 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
709
710 /* Retry or terminate any stalled transactions */
711 if (fsr & FSR_SS)
712 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
713
714 return ret;
715}
716
717static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
718{
719 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
720 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000721 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100722
723 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
724 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
725 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
726 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
727
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000728 if (!gfsr)
729 return IRQ_NONE;
730
Will Deacon45ae7cf2013-06-24 18:31:25 +0100731 dev_err_ratelimited(smmu->dev,
732 "Unexpected global fault, this could be serious\n");
733 dev_err_ratelimited(smmu->dev,
734 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
735 gfsr, gfsynr0, gfsynr1, gfsynr2);
736
737 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100738 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100739}
740
Will Deacon518f7132014-11-14 17:17:54 +0000741static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
742 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100743{
744 u32 reg;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100745 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100746 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +0100747 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
748 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100749 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100750
Will Deacon45ae7cf2013-06-24 18:31:25 +0100751 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100752 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
753 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100754
Will Deacon4a1c93c2015-03-04 12:21:03 +0000755 if (smmu->version > ARM_SMMU_V1) {
Will Deacon4a1c93c2015-03-04 12:21:03 +0000756#ifdef CONFIG_64BIT
757 reg = CBA2R_RW64_64BIT;
758#else
759 reg = CBA2R_RW64_32BIT;
760#endif
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800761 /* 16-bit VMIDs live in CBA2R */
762 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800763 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800764
Will Deacon4a1c93c2015-03-04 12:21:03 +0000765 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
766 }
767
Will Deacon45ae7cf2013-06-24 18:31:25 +0100768 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100769 reg = cfg->cbar;
Robin Murphy09360402014-08-28 17:51:59 +0100770 if (smmu->version == ARM_SMMU_V1)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700771 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100772
Will Deacon57ca90f2014-02-06 14:59:05 +0000773 /*
774 * Use the weakest shareability/memory types, so they are
775 * overridden by the ttbcr/pte.
776 */
777 if (stage1) {
778 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
779 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800780 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
781 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800782 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000783 }
Will Deacon44680ee2014-06-25 11:29:12 +0100784 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100785
Will Deacon518f7132014-11-14 17:17:54 +0000786 /* TTBRs */
787 if (stage1) {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100788 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100789
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800790 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100791 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0);
792
793 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800794 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100795 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon518f7132014-11-14 17:17:54 +0000796 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100797 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
798 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +0000799 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100800
Will Deacon518f7132014-11-14 17:17:54 +0000801 /* TTBCR */
802 if (stage1) {
803 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
804 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
805 if (smmu->version > ARM_SMMU_V1) {
806 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Will Deacon5dc56162015-05-08 17:44:22 +0100807 reg |= TTBCR2_SEP_UPSTREAM;
Will Deacon518f7132014-11-14 17:17:54 +0000808 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100809 }
810 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000811 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
812 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100813 }
814
Will Deacon518f7132014-11-14 17:17:54 +0000815 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100816 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +0000817 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100818 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Will Deacon518f7132014-11-14 17:17:54 +0000819 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
820 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100821 }
822
Will Deacon45ae7cf2013-06-24 18:31:25 +0100823 /* SCTLR */
824 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
825 if (stage1)
826 reg |= SCTLR_S1_ASIDPNE;
827#ifdef __BIG_ENDIAN
828 reg |= SCTLR_E;
829#endif
Will Deacon25724842013-08-21 13:49:53 +0100830 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100831}
832
833static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100834 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100835{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100836 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000837 unsigned long ias, oas;
838 struct io_pgtable_ops *pgtbl_ops;
839 struct io_pgtable_cfg pgtbl_cfg;
840 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100841 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100842 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100843
Will Deacon518f7132014-11-14 17:17:54 +0000844 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100845 if (smmu_domain->smmu)
846 goto out_unlock;
847
Will Deaconc752ce42014-06-25 22:46:31 +0100848 /*
849 * Mapping the requested stage onto what we support is surprisingly
850 * complicated, mainly because the spec allows S1+S2 SMMUs without
851 * support for nested translation. That means we end up with the
852 * following table:
853 *
854 * Requested Supported Actual
855 * S1 N S1
856 * S1 S1+S2 S1
857 * S1 S2 S2
858 * S1 S1 S1
859 * N N N
860 * N S1+S2 S2
861 * N S2 S2
862 * N S1 S1
863 *
864 * Note that you can't actually request stage-2 mappings.
865 */
866 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
867 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
868 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
869 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
870
871 switch (smmu_domain->stage) {
872 case ARM_SMMU_DOMAIN_S1:
873 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
874 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000875 ias = smmu->va_size;
876 oas = smmu->ipa_size;
877 if (IS_ENABLED(CONFIG_64BIT))
878 fmt = ARM_64_LPAE_S1;
879 else
880 fmt = ARM_32_LPAE_S1;
Will Deaconc752ce42014-06-25 22:46:31 +0100881 break;
882 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100883 /*
884 * We will likely want to change this if/when KVM gets
885 * involved.
886 */
Will Deaconc752ce42014-06-25 22:46:31 +0100887 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100888 cfg->cbar = CBAR_TYPE_S2_TRANS;
889 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000890 ias = smmu->ipa_size;
891 oas = smmu->pa_size;
892 if (IS_ENABLED(CONFIG_64BIT))
893 fmt = ARM_64_LPAE_S2;
894 else
895 fmt = ARM_32_LPAE_S2;
Will Deaconc752ce42014-06-25 22:46:31 +0100896 break;
897 default:
898 ret = -EINVAL;
899 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100900 }
901
902 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
903 smmu->num_context_banks);
904 if (IS_ERR_VALUE(ret))
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100905 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100906
Will Deacon44680ee2014-06-25 11:29:12 +0100907 cfg->cbndx = ret;
Robin Murphy09360402014-08-28 17:51:59 +0100908 if (smmu->version == ARM_SMMU_V1) {
Will Deacon44680ee2014-06-25 11:29:12 +0100909 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
910 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100911 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100912 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100913 }
914
Will Deacon518f7132014-11-14 17:17:54 +0000915 pgtbl_cfg = (struct io_pgtable_cfg) {
916 .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
917 .ias = ias,
918 .oas = oas,
919 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100920 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000921 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100922
Will Deacon518f7132014-11-14 17:17:54 +0000923 smmu_domain->smmu = smmu;
924 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
925 if (!pgtbl_ops) {
926 ret = -ENOMEM;
927 goto out_clear_smmu;
928 }
929
930 /* Update our support page sizes to reflect the page table format */
931 arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
932
933 /* Initialise the context bank with our page table cfg */
934 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
935
936 /*
937 * Request context fault interrupt. Do this last to avoid the
938 * handler seeing a half-initialised domain state.
939 */
Will Deacon44680ee2014-06-25 11:29:12 +0100940 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100941 ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
942 "arm-smmu-context-fault", domain);
943 if (IS_ERR_VALUE(ret)) {
944 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100945 cfg->irptndx, irq);
946 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100947 }
948
Will Deacon518f7132014-11-14 17:17:54 +0000949 mutex_unlock(&smmu_domain->init_mutex);
950
951 /* Publish page table ops for map/unmap */
952 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100953 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100954
Will Deacon518f7132014-11-14 17:17:54 +0000955out_clear_smmu:
956 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100957out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +0000958 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100959 return ret;
960}
961
962static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
963{
Joerg Roedel1d672632015-03-26 13:43:10 +0100964 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100965 struct arm_smmu_device *smmu = smmu_domain->smmu;
966 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +0100967 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100968 int irq;
969
970 if (!smmu)
971 return;
972
Will Deacon518f7132014-11-14 17:17:54 +0000973 /*
974 * Disable the context bank and free the page tables before freeing
975 * it.
976 */
Will Deacon44680ee2014-06-25 11:29:12 +0100977 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100978 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +0100979
Will Deacon44680ee2014-06-25 11:29:12 +0100980 if (cfg->irptndx != INVALID_IRPTNDX) {
981 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100982 free_irq(irq, domain);
983 }
984
Markus Elfring44830b02015-11-06 18:32:41 +0100985 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +0100986 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100987}
988
Joerg Roedel1d672632015-03-26 13:43:10 +0100989static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100990{
991 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100992
Robin Murphy9adb9592016-01-26 18:06:36 +0000993 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
Joerg Roedel1d672632015-03-26 13:43:10 +0100994 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100995 /*
996 * Allocate the domain and initialise some of its data structures.
997 * We can't really do anything meaningful until we've added a
998 * master.
999 */
1000 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1001 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001002 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001003
Robin Murphy9adb9592016-01-26 18:06:36 +00001004 if (type == IOMMU_DOMAIN_DMA &&
1005 iommu_get_dma_cookie(&smmu_domain->domain)) {
1006 kfree(smmu_domain);
1007 return NULL;
1008 }
1009
Will Deacon518f7132014-11-14 17:17:54 +00001010 mutex_init(&smmu_domain->init_mutex);
1011 spin_lock_init(&smmu_domain->pgtbl_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001012
1013 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001014}
1015
Joerg Roedel1d672632015-03-26 13:43:10 +01001016static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001017{
Joerg Roedel1d672632015-03-26 13:43:10 +01001018 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001019
1020 /*
1021 * Free the domain resources. We assume that all devices have
1022 * already been detached.
1023 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001024 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001025 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001026 kfree(smmu_domain);
1027}
1028
1029static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001030 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001031{
1032 int i;
1033 struct arm_smmu_smr *smrs;
1034 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1035
1036 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1037 return 0;
1038
Will Deacona9a1b0b2014-05-01 18:05:08 +01001039 if (cfg->smrs)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001040 return -EEXIST;
1041
Mitchel Humpherys29073202014-07-08 09:52:18 -07001042 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001043 if (!smrs) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001044 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1045 cfg->num_streamids);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001046 return -ENOMEM;
1047 }
1048
Will Deacon44680ee2014-06-25 11:29:12 +01001049 /* Allocate the SMRs on the SMMU */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001050 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001051 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1052 smmu->num_mapping_groups);
1053 if (IS_ERR_VALUE(idx)) {
1054 dev_err(smmu->dev, "failed to allocate free SMR\n");
1055 goto err_free_smrs;
1056 }
1057
1058 smrs[i] = (struct arm_smmu_smr) {
1059 .idx = idx,
1060 .mask = 0, /* We don't currently share SMRs */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001061 .id = cfg->streamids[i],
Will Deacon45ae7cf2013-06-24 18:31:25 +01001062 };
1063 }
1064
1065 /* It worked! Now, poke the actual hardware */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001066 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001067 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1068 smrs[i].mask << SMR_MASK_SHIFT;
1069 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1070 }
1071
Will Deacona9a1b0b2014-05-01 18:05:08 +01001072 cfg->smrs = smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001073 return 0;
1074
1075err_free_smrs:
1076 while (--i >= 0)
1077 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1078 kfree(smrs);
1079 return -ENOSPC;
1080}
1081
1082static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001083 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001084{
1085 int i;
1086 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001087 struct arm_smmu_smr *smrs = cfg->smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001088
Will Deacon43b412b2014-07-15 11:22:24 +01001089 if (!smrs)
1090 return;
1091
Will Deacon45ae7cf2013-06-24 18:31:25 +01001092 /* Invalidate the SMRs before freeing back to the allocator */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001093 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001094 u8 idx = smrs[i].idx;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001095
Will Deacon45ae7cf2013-06-24 18:31:25 +01001096 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1097 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1098 }
1099
Will Deacona9a1b0b2014-05-01 18:05:08 +01001100 cfg->smrs = NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001101 kfree(smrs);
1102}
1103
Will Deacon45ae7cf2013-06-24 18:31:25 +01001104static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001105 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001106{
1107 int i, ret;
Will Deacon44680ee2014-06-25 11:29:12 +01001108 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001109 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1110
Will Deacon8f68f8e2014-07-15 11:27:08 +01001111 /* Devices in an IOMMU group may already be configured */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001112 ret = arm_smmu_master_configure_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001113 if (ret)
Will Deacon8f68f8e2014-07-15 11:27:08 +01001114 return ret == -EEXIST ? 0 : ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001115
Will Deaconcbf82772016-02-18 12:05:57 +00001116 /*
1117 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1118 * for all devices behind the SMMU.
1119 */
1120 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1121 return 0;
1122
Will Deacona9a1b0b2014-05-01 18:05:08 +01001123 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001124 u32 idx, s2cr;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001125
Will Deacona9a1b0b2014-05-01 18:05:08 +01001126 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphyd3461802016-01-26 18:06:34 +00001127 s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
Will Deacon44680ee2014-06-25 11:29:12 +01001128 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001129 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1130 }
1131
1132 return 0;
1133}
1134
1135static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001136 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001137{
Will Deacon43b412b2014-07-15 11:22:24 +01001138 int i;
Will Deacon44680ee2014-06-25 11:29:12 +01001139 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon43b412b2014-07-15 11:22:24 +01001140 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001141
Will Deacon8f68f8e2014-07-15 11:27:08 +01001142 /* An IOMMU group is torn down by the first device to be removed */
1143 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1144 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001145
1146 /*
1147 * We *must* clear the S2CR first, because freeing the SMR means
1148 * that it can be re-allocated immediately.
1149 */
Will Deacon43b412b2014-07-15 11:22:24 +01001150 for (i = 0; i < cfg->num_streamids; ++i) {
1151 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphy25a1c962016-02-10 14:25:33 +00001152 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon43b412b2014-07-15 11:22:24 +01001153
Robin Murphy25a1c962016-02-10 14:25:33 +00001154 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
Will Deacon43b412b2014-07-15 11:22:24 +01001155 }
1156
Will Deacona9a1b0b2014-05-01 18:05:08 +01001157 arm_smmu_master_free_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001158}
1159
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001160static void arm_smmu_detach_dev(struct device *dev,
1161 struct arm_smmu_master_cfg *cfg)
1162{
1163 struct iommu_domain *domain = dev->archdata.iommu;
1164 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1165
1166 dev->archdata.iommu = NULL;
1167 arm_smmu_domain_remove_master(smmu_domain, cfg);
1168}
1169
Will Deacon45ae7cf2013-06-24 18:31:25 +01001170static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1171{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001172 int ret;
Joerg Roedel1d672632015-03-26 13:43:10 +01001173 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001174 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001175 struct arm_smmu_master_cfg *cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001176
Will Deacon8f68f8e2014-07-15 11:27:08 +01001177 smmu = find_smmu_for_device(dev);
Will Deacon44680ee2014-06-25 11:29:12 +01001178 if (!smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001179 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1180 return -ENXIO;
1181 }
1182
Will Deacon518f7132014-11-14 17:17:54 +00001183 /* Ensure that the domain is finalised */
1184 ret = arm_smmu_init_domain_context(domain, smmu);
1185 if (IS_ERR_VALUE(ret))
1186 return ret;
1187
Will Deacon45ae7cf2013-06-24 18:31:25 +01001188 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001189 * Sanity check the domain. We don't support domains across
1190 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001191 */
Will Deacon518f7132014-11-14 17:17:54 +00001192 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001193 dev_err(dev,
1194 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001195 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1196 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001197 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001198
1199 /* Looks ok, so add the device to the domain */
Will Deacon8f68f8e2014-07-15 11:27:08 +01001200 cfg = find_smmu_master_cfg(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001201 if (!cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001202 return -ENODEV;
1203
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001204 /* Detach the dev from its current domain */
1205 if (dev->archdata.iommu)
1206 arm_smmu_detach_dev(dev, cfg);
1207
Will Deacon844e35b2014-07-17 11:23:51 +01001208 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
1209 if (!ret)
1210 dev->archdata.iommu = domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001211 return ret;
1212}
1213
Will Deacon45ae7cf2013-06-24 18:31:25 +01001214static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001215 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001216{
Will Deacon518f7132014-11-14 17:17:54 +00001217 int ret;
1218 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001219 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001220 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001221
Will Deacon518f7132014-11-14 17:17:54 +00001222 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001223 return -ENODEV;
1224
Will Deacon518f7132014-11-14 17:17:54 +00001225 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1226 ret = ops->map(ops, iova, paddr, size, prot);
1227 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1228 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001229}
1230
1231static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1232 size_t size)
1233{
Will Deacon518f7132014-11-14 17:17:54 +00001234 size_t ret;
1235 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001236 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001237 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001238
Will Deacon518f7132014-11-14 17:17:54 +00001239 if (!ops)
1240 return 0;
1241
1242 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1243 ret = ops->unmap(ops, iova, size);
1244 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1245 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001246}
1247
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001248static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1249 dma_addr_t iova)
1250{
Joerg Roedel1d672632015-03-26 13:43:10 +01001251 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001252 struct arm_smmu_device *smmu = smmu_domain->smmu;
1253 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1254 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1255 struct device *dev = smmu->dev;
1256 void __iomem *cb_base;
1257 u32 tmp;
1258 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01001259 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001260
1261 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1262
Robin Murphy661d9622015-05-27 17:09:34 +01001263 /* ATS1 registers can only be written atomically */
1264 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001265 if (smmu->version == ARM_SMMU_V2)
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001266 smmu_writeq(va, cb_base + ARM_SMMU_CB_ATS1PR);
Robin Murphy661d9622015-05-27 17:09:34 +01001267 else
Robin Murphy661d9622015-05-27 17:09:34 +01001268 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001269
1270 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1271 !(tmp & ATSR_ACTIVE), 5, 50)) {
1272 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001273 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001274 &iova);
1275 return ops->iova_to_phys(ops, iova);
1276 }
1277
1278 phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO);
1279 phys |= ((u64)readl_relaxed(cb_base + ARM_SMMU_CB_PAR_HI)) << 32;
1280
1281 if (phys & CB_PAR_F) {
1282 dev_err(dev, "translation fault!\n");
1283 dev_err(dev, "PAR = 0x%llx\n", phys);
1284 return 0;
1285 }
1286
1287 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1288}
1289
Will Deacon45ae7cf2013-06-24 18:31:25 +01001290static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001291 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001292{
Will Deacon518f7132014-11-14 17:17:54 +00001293 phys_addr_t ret;
1294 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001295 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001296 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001297
Will Deacon518f7132014-11-14 17:17:54 +00001298 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001299 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001300
Will Deacon518f7132014-11-14 17:17:54 +00001301 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001302 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1303 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001304 ret = arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001305 } else {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001306 ret = ops->iova_to_phys(ops, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001307 }
1308
Will Deacon518f7132014-11-14 17:17:54 +00001309 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001310
Will Deacon518f7132014-11-14 17:17:54 +00001311 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001312}
1313
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001314static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001315{
Will Deacond0948942014-06-24 17:30:10 +01001316 switch (cap) {
1317 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001318 /*
1319 * Return true here as the SMMU can always send out coherent
1320 * requests.
1321 */
1322 return true;
Will Deacond0948942014-06-24 17:30:10 +01001323 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001324 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001325 case IOMMU_CAP_NOEXEC:
1326 return true;
Will Deacond0948942014-06-24 17:30:10 +01001327 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001328 return false;
Will Deacond0948942014-06-24 17:30:10 +01001329 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001330}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001331
Will Deacona9a1b0b2014-05-01 18:05:08 +01001332static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
1333{
1334 *((u16 *)data) = alias;
1335 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001336}
1337
Will Deacon8f68f8e2014-07-15 11:27:08 +01001338static void __arm_smmu_release_pci_iommudata(void *data)
1339{
1340 kfree(data);
1341}
1342
Joerg Roedelaf659932015-10-21 23:51:41 +02001343static int arm_smmu_init_pci_device(struct pci_dev *pdev,
1344 struct iommu_group *group)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001345{
Will Deacon03edb222015-01-19 14:27:33 +00001346 struct arm_smmu_master_cfg *cfg;
Joerg Roedelaf659932015-10-21 23:51:41 +02001347 u16 sid;
1348 int i;
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001349
Will Deacon03edb222015-01-19 14:27:33 +00001350 cfg = iommu_group_get_iommudata(group);
1351 if (!cfg) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001352 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001353 if (!cfg)
1354 return -ENOMEM;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001355
Will Deacon03edb222015-01-19 14:27:33 +00001356 iommu_group_set_iommudata(group, cfg,
1357 __arm_smmu_release_pci_iommudata);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001358 }
1359
Joerg Roedelaf659932015-10-21 23:51:41 +02001360 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
1361 return -ENOSPC;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001362
Will Deacon03edb222015-01-19 14:27:33 +00001363 /*
1364 * Assume Stream ID == Requester ID for now.
1365 * We need a way to describe the ID mappings in FDT.
1366 */
1367 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1368 for (i = 0; i < cfg->num_streamids; ++i)
1369 if (cfg->streamids[i] == sid)
1370 break;
1371
1372 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1373 if (i == cfg->num_streamids)
1374 cfg->streamids[cfg->num_streamids++] = sid;
1375
1376 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001377}
1378
Joerg Roedelaf659932015-10-21 23:51:41 +02001379static int arm_smmu_init_platform_device(struct device *dev,
1380 struct iommu_group *group)
Will Deacon03edb222015-01-19 14:27:33 +00001381{
Will Deacon03edb222015-01-19 14:27:33 +00001382 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001383 struct arm_smmu_master *master;
Will Deacon03edb222015-01-19 14:27:33 +00001384
1385 if (!smmu)
1386 return -ENODEV;
1387
1388 master = find_smmu_master(smmu, dev->of_node);
1389 if (!master)
1390 return -ENODEV;
1391
Will Deacon03edb222015-01-19 14:27:33 +00001392 iommu_group_set_iommudata(group, &master->cfg, NULL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001393
1394 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001395}
1396
1397static int arm_smmu_add_device(struct device *dev)
1398{
Joerg Roedelaf659932015-10-21 23:51:41 +02001399 struct iommu_group *group;
Will Deacon03edb222015-01-19 14:27:33 +00001400
Joerg Roedelaf659932015-10-21 23:51:41 +02001401 group = iommu_group_get_for_dev(dev);
1402 if (IS_ERR(group))
1403 return PTR_ERR(group);
1404
Peng Fan9a4a9d82015-11-20 16:56:18 +08001405 iommu_group_put(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001406 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001407}
1408
Will Deacon45ae7cf2013-06-24 18:31:25 +01001409static void arm_smmu_remove_device(struct device *dev)
1410{
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001411 iommu_group_remove_device(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001412}
1413
Joerg Roedelaf659932015-10-21 23:51:41 +02001414static struct iommu_group *arm_smmu_device_group(struct device *dev)
1415{
1416 struct iommu_group *group;
1417 int ret;
1418
1419 if (dev_is_pci(dev))
1420 group = pci_device_group(dev);
1421 else
1422 group = generic_device_group(dev);
1423
1424 if (IS_ERR(group))
1425 return group;
1426
1427 if (dev_is_pci(dev))
1428 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
1429 else
1430 ret = arm_smmu_init_platform_device(dev, group);
1431
1432 if (ret) {
1433 iommu_group_put(group);
1434 group = ERR_PTR(ret);
1435 }
1436
1437 return group;
1438}
1439
Will Deaconc752ce42014-06-25 22:46:31 +01001440static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1441 enum iommu_attr attr, void *data)
1442{
Joerg Roedel1d672632015-03-26 13:43:10 +01001443 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001444
1445 switch (attr) {
1446 case DOMAIN_ATTR_NESTING:
1447 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1448 return 0;
1449 default:
1450 return -ENODEV;
1451 }
1452}
1453
1454static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1455 enum iommu_attr attr, void *data)
1456{
Will Deacon518f7132014-11-14 17:17:54 +00001457 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001458 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001459
Will Deacon518f7132014-11-14 17:17:54 +00001460 mutex_lock(&smmu_domain->init_mutex);
1461
Will Deaconc752ce42014-06-25 22:46:31 +01001462 switch (attr) {
1463 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00001464 if (smmu_domain->smmu) {
1465 ret = -EPERM;
1466 goto out_unlock;
1467 }
1468
Will Deaconc752ce42014-06-25 22:46:31 +01001469 if (*(int *)data)
1470 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1471 else
1472 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1473
Will Deacon518f7132014-11-14 17:17:54 +00001474 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001475 default:
Will Deacon518f7132014-11-14 17:17:54 +00001476 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01001477 }
Will Deacon518f7132014-11-14 17:17:54 +00001478
1479out_unlock:
1480 mutex_unlock(&smmu_domain->init_mutex);
1481 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001482}
1483
Will Deacon518f7132014-11-14 17:17:54 +00001484static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001485 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001486 .domain_alloc = arm_smmu_domain_alloc,
1487 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001488 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001489 .map = arm_smmu_map,
1490 .unmap = arm_smmu_unmap,
Joerg Roedel76771c92014-12-02 13:07:13 +01001491 .map_sg = default_iommu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01001492 .iova_to_phys = arm_smmu_iova_to_phys,
1493 .add_device = arm_smmu_add_device,
1494 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001495 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001496 .domain_get_attr = arm_smmu_domain_get_attr,
1497 .domain_set_attr = arm_smmu_domain_set_attr,
Will Deacon518f7132014-11-14 17:17:54 +00001498 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001499};
1500
1501static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1502{
1503 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001504 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001505 int i = 0;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001506 u32 reg;
1507
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001508 /* clear global FSR */
1509 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1510 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001511
Robin Murphy25a1c962016-02-10 14:25:33 +00001512 /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
1513 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001514 for (i = 0; i < smmu->num_mapping_groups; ++i) {
Olav Haugan3c8766d2014-08-22 17:12:32 -07001515 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
Robin Murphy25a1c962016-02-10 14:25:33 +00001516 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001517 }
1518
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001519 /* Make sure all context banks are disabled and clear CB_FSR */
1520 for (i = 0; i < smmu->num_context_banks; ++i) {
1521 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1522 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1523 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001524 /*
1525 * Disable MMU-500's not-particularly-beneficial next-page
1526 * prefetcher for the sake of errata #841119 and #826419.
1527 */
1528 if (smmu->model == ARM_MMU500) {
1529 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1530 reg &= ~ARM_MMU500_ACTLR_CPRE;
1531 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1532 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001533 }
Will Deacon1463fe42013-07-31 19:21:27 +01001534
Will Deacon45ae7cf2013-06-24 18:31:25 +01001535 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001536 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1537 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1538
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001539 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001540
Will Deacon45ae7cf2013-06-24 18:31:25 +01001541 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001542 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001543
1544 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001545 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001546
Robin Murphy25a1c962016-02-10 14:25:33 +00001547 /* Enable client access, handling unmatched streams as appropriate */
1548 reg &= ~sCR0_CLIENTPD;
1549 if (disable_bypass)
1550 reg |= sCR0_USFCFG;
1551 else
1552 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001553
1554 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001555 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001556
1557 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001558 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001559
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001560 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1561 reg |= sCR0_VMID16EN;
1562
Will Deacon45ae7cf2013-06-24 18:31:25 +01001563 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00001564 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001565 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001566}
1567
1568static int arm_smmu_id_size_to_bits(int size)
1569{
1570 switch (size) {
1571 case 0:
1572 return 32;
1573 case 1:
1574 return 36;
1575 case 2:
1576 return 40;
1577 case 3:
1578 return 42;
1579 case 4:
1580 return 44;
1581 case 5:
1582 default:
1583 return 48;
1584 }
1585}
1586
1587static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1588{
1589 unsigned long size;
1590 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1591 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001592 bool cttw_dt, cttw_reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001593
1594 dev_notice(smmu->dev, "probing hardware configuration...\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001595 dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version);
1596
1597 /* ID0 */
1598 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001599
1600 /* Restrict available stages based on module parameter */
1601 if (force_stage == 1)
1602 id &= ~(ID0_S2TS | ID0_NTS);
1603 else if (force_stage == 2)
1604 id &= ~(ID0_S1TS | ID0_NTS);
1605
Will Deacon45ae7cf2013-06-24 18:31:25 +01001606 if (id & ID0_S1TS) {
1607 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1608 dev_notice(smmu->dev, "\tstage 1 translation\n");
1609 }
1610
1611 if (id & ID0_S2TS) {
1612 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1613 dev_notice(smmu->dev, "\tstage 2 translation\n");
1614 }
1615
1616 if (id & ID0_NTS) {
1617 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1618 dev_notice(smmu->dev, "\tnested translation\n");
1619 }
1620
1621 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001622 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001623 dev_err(smmu->dev, "\tno translation support!\n");
1624 return -ENODEV;
1625 }
1626
Will Deacond38f0ff2015-06-29 17:47:42 +01001627 if ((id & ID0_S1TS) && ((smmu->version == 1) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001628 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1629 dev_notice(smmu->dev, "\taddress translation ops\n");
1630 }
1631
Robin Murphybae2c2d2015-07-29 19:46:05 +01001632 /*
1633 * In order for DMA API calls to work properly, we must defer to what
1634 * the DT says about coherency, regardless of what the hardware claims.
1635 * Fortunately, this also opens up a workaround for systems where the
1636 * ID register value has ended up configured incorrectly.
1637 */
1638 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
1639 cttw_reg = !!(id & ID0_CTTW);
1640 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001641 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001642 if (cttw_dt || cttw_reg)
1643 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1644 cttw_dt ? "" : "non-");
1645 if (cttw_dt != cttw_reg)
1646 dev_notice(smmu->dev,
1647 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001648
1649 if (id & ID0_SMS) {
1650 u32 smr, sid, mask;
1651
1652 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1653 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
1654 ID0_NUMSMRG_MASK;
1655 if (smmu->num_mapping_groups == 0) {
1656 dev_err(smmu->dev,
1657 "stream-matching supported, but no SMRs present!\n");
1658 return -ENODEV;
1659 }
1660
1661 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
1662 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
1663 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1664 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1665
1666 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
1667 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
1668 if ((mask & sid) != sid) {
1669 dev_err(smmu->dev,
1670 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
1671 mask, sid);
1672 return -ENODEV;
1673 }
1674
1675 dev_notice(smmu->dev,
1676 "\tstream matching with %u register groups, mask 0x%x",
1677 smmu->num_mapping_groups, mask);
Olav Haugan3c8766d2014-08-22 17:12:32 -07001678 } else {
1679 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
1680 ID0_NUMSIDB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001681 }
1682
1683 /* ID1 */
1684 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001685 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001686
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001687 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001688 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01001689 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001690 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001691 dev_warn(smmu->dev,
1692 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1693 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001694
Will Deacon518f7132014-11-14 17:17:54 +00001695 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001696 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1697 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1698 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1699 return -ENODEV;
1700 }
1701 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1702 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001703 /*
1704 * Cavium CN88xx erratum #27704.
1705 * Ensure ASID and VMID allocation is unique across all SMMUs in
1706 * the system.
1707 */
1708 if (smmu->model == CAVIUM_SMMUV2) {
1709 smmu->cavium_id_base =
1710 atomic_add_return(smmu->num_context_banks,
1711 &cavium_smmu_context_count);
1712 smmu->cavium_id_base -= smmu->num_context_banks;
1713 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001714
1715 /* ID2 */
1716 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1717 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001718 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001719
Will Deacon518f7132014-11-14 17:17:54 +00001720 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001721 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001722 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001723
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001724 if (id & ID2_VMID16)
1725 smmu->features |= ARM_SMMU_FEAT_VMID16;
1726
Robin Murphyf1d84542015-03-04 16:41:05 +00001727 /*
1728 * What the page table walker can address actually depends on which
1729 * descriptor format is in use, but since a) we don't know that yet,
1730 * and b) it can vary per context bank, this will have to do...
1731 */
1732 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1733 dev_warn(smmu->dev,
1734 "failed to set DMA mask for table walker\n");
1735
Robin Murphy09360402014-08-28 17:51:59 +01001736 if (smmu->version == ARM_SMMU_V1) {
Will Deacon518f7132014-11-14 17:17:54 +00001737 smmu->va_size = smmu->ipa_size;
1738 size = SZ_4K | SZ_2M | SZ_1G;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001739 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001740 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00001741 smmu->va_size = arm_smmu_id_size_to_bits(size);
1742#ifndef CONFIG_64BIT
1743 smmu->va_size = min(32UL, smmu->va_size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001744#endif
Will Deacon518f7132014-11-14 17:17:54 +00001745 size = 0;
1746 if (id & ID2_PTFS_4K)
1747 size |= SZ_4K | SZ_2M | SZ_1G;
1748 if (id & ID2_PTFS_16K)
1749 size |= SZ_16K | SZ_32M;
1750 if (id & ID2_PTFS_64K)
1751 size |= SZ_64K | SZ_512M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001752 }
1753
Will Deacon518f7132014-11-14 17:17:54 +00001754 arm_smmu_ops.pgsize_bitmap &= size;
1755 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
1756
Will Deacon28d60072014-09-01 16:24:48 +01001757 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1758 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001759 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001760
1761 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1762 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001763 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001764
Will Deacon45ae7cf2013-06-24 18:31:25 +01001765 return 0;
1766}
1767
Robin Murphy67b65a32016-04-13 18:12:57 +01001768struct arm_smmu_match_data {
1769 enum arm_smmu_arch_version version;
1770 enum arm_smmu_implementation model;
1771};
1772
1773#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1774static struct arm_smmu_match_data name = { .version = ver, .model = imp }
1775
1776ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1777ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001778ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01001779ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01001780
Joerg Roedel09b52692014-10-02 12:24:45 +02001781static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01001782 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1783 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1784 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
1785 { .compatible = "arm,mmu-401", .data = &smmu_generic_v1 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001786 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01001787 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01001788 { },
1789};
1790MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1791
Will Deacon45ae7cf2013-06-24 18:31:25 +01001792static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1793{
Robin Murphy09360402014-08-28 17:51:59 +01001794 const struct of_device_id *of_id;
Robin Murphy67b65a32016-04-13 18:12:57 +01001795 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001796 struct resource *res;
1797 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001798 struct device *dev = &pdev->dev;
1799 struct rb_node *node;
1800 struct of_phandle_args masterspec;
1801 int num_irqs, i, err;
1802
1803 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1804 if (!smmu) {
1805 dev_err(dev, "failed to allocate arm_smmu_device\n");
1806 return -ENOMEM;
1807 }
1808 smmu->dev = dev;
1809
Robin Murphy09360402014-08-28 17:51:59 +01001810 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
Robin Murphy67b65a32016-04-13 18:12:57 +01001811 data = of_id->data;
1812 smmu->version = data->version;
1813 smmu->model = data->model;
Robin Murphy09360402014-08-28 17:51:59 +01001814
Will Deacon45ae7cf2013-06-24 18:31:25 +01001815 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01001816 smmu->base = devm_ioremap_resource(dev, res);
1817 if (IS_ERR(smmu->base))
1818 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001819 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001820
1821 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1822 &smmu->num_global_irqs)) {
1823 dev_err(dev, "missing #global-interrupts property\n");
1824 return -ENODEV;
1825 }
1826
1827 num_irqs = 0;
1828 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
1829 num_irqs++;
1830 if (num_irqs > smmu->num_global_irqs)
1831 smmu->num_context_irqs++;
1832 }
1833
Andreas Herrmann44a08de2013-10-01 13:39:07 +01001834 if (!smmu->num_context_irqs) {
1835 dev_err(dev, "found %d interrupts but expected at least %d\n",
1836 num_irqs, smmu->num_global_irqs + 1);
1837 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001838 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001839
1840 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
1841 GFP_KERNEL);
1842 if (!smmu->irqs) {
1843 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
1844 return -ENOMEM;
1845 }
1846
1847 for (i = 0; i < num_irqs; ++i) {
1848 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07001849
Will Deacon45ae7cf2013-06-24 18:31:25 +01001850 if (irq < 0) {
1851 dev_err(dev, "failed to get irq index %d\n", i);
1852 return -ENODEV;
1853 }
1854 smmu->irqs[i] = irq;
1855 }
1856
Olav Haugan3c8766d2014-08-22 17:12:32 -07001857 err = arm_smmu_device_cfg_probe(smmu);
1858 if (err)
1859 return err;
1860
Will Deacon45ae7cf2013-06-24 18:31:25 +01001861 i = 0;
1862 smmu->masters = RB_ROOT;
1863 while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters",
1864 "#stream-id-cells", i,
1865 &masterspec)) {
1866 err = register_smmu_master(smmu, dev, &masterspec);
1867 if (err) {
1868 dev_err(dev, "failed to add master %s\n",
1869 masterspec.np->name);
1870 goto out_put_masters;
1871 }
1872
1873 i++;
1874 }
1875 dev_notice(dev, "registered %d master devices\n", i);
1876
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001877 parse_driver_options(smmu);
1878
Robin Murphy09360402014-08-28 17:51:59 +01001879 if (smmu->version > ARM_SMMU_V1 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01001880 smmu->num_context_banks != smmu->num_context_irqs) {
1881 dev_err(dev,
1882 "found only %d context interrupt(s) but %d required\n",
1883 smmu->num_context_irqs, smmu->num_context_banks);
Wei Yongjun89a23cde2013-11-15 09:42:30 +00001884 err = -ENODEV;
Will Deacon44680ee2014-06-25 11:29:12 +01001885 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001886 }
1887
Will Deacon45ae7cf2013-06-24 18:31:25 +01001888 for (i = 0; i < smmu->num_global_irqs; ++i) {
1889 err = request_irq(smmu->irqs[i],
1890 arm_smmu_global_fault,
1891 IRQF_SHARED,
1892 "arm-smmu global fault",
1893 smmu);
1894 if (err) {
1895 dev_err(dev, "failed to request global IRQ %d (%u)\n",
1896 i, smmu->irqs[i]);
1897 goto out_free_irqs;
1898 }
1899 }
1900
1901 INIT_LIST_HEAD(&smmu->list);
1902 spin_lock(&arm_smmu_devices_lock);
1903 list_add(&smmu->list, &arm_smmu_devices);
1904 spin_unlock(&arm_smmu_devices_lock);
Will Deaconfd90cec2013-08-21 13:56:34 +01001905
1906 arm_smmu_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001907 return 0;
1908
1909out_free_irqs:
1910 while (i--)
1911 free_irq(smmu->irqs[i], smmu);
1912
Will Deacon45ae7cf2013-06-24 18:31:25 +01001913out_put_masters:
1914 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07001915 struct arm_smmu_master *master
1916 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001917 of_node_put(master->of_node);
1918 }
1919
1920 return err;
1921}
1922
1923static int arm_smmu_device_remove(struct platform_device *pdev)
1924{
1925 int i;
1926 struct device *dev = &pdev->dev;
1927 struct arm_smmu_device *curr, *smmu = NULL;
1928 struct rb_node *node;
1929
1930 spin_lock(&arm_smmu_devices_lock);
1931 list_for_each_entry(curr, &arm_smmu_devices, list) {
1932 if (curr->dev == dev) {
1933 smmu = curr;
1934 list_del(&smmu->list);
1935 break;
1936 }
1937 }
1938 spin_unlock(&arm_smmu_devices_lock);
1939
1940 if (!smmu)
1941 return -ENODEV;
1942
Will Deacon45ae7cf2013-06-24 18:31:25 +01001943 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07001944 struct arm_smmu_master *master
1945 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001946 of_node_put(master->of_node);
1947 }
1948
Will Deaconecfadb62013-07-31 19:21:28 +01001949 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Will Deacon45ae7cf2013-06-24 18:31:25 +01001950 dev_err(dev, "removing device with active domains!\n");
1951
1952 for (i = 0; i < smmu->num_global_irqs; ++i)
1953 free_irq(smmu->irqs[i], smmu);
1954
1955 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07001956 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001957 return 0;
1958}
1959
Will Deacon45ae7cf2013-06-24 18:31:25 +01001960static struct platform_driver arm_smmu_driver = {
1961 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001962 .name = "arm-smmu",
1963 .of_match_table = of_match_ptr(arm_smmu_of_match),
1964 },
1965 .probe = arm_smmu_device_dt_probe,
1966 .remove = arm_smmu_device_remove,
1967};
1968
1969static int __init arm_smmu_init(void)
1970{
Thierry Reding0e7d37a2014-11-07 15:26:18 +00001971 struct device_node *np;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001972 int ret;
1973
Thierry Reding0e7d37a2014-11-07 15:26:18 +00001974 /*
1975 * Play nice with systems that don't have an ARM SMMU by checking that
1976 * an ARM SMMU exists in the system before proceeding with the driver
1977 * and IOMMU bus operation registration.
1978 */
1979 np = of_find_matching_node(NULL, arm_smmu_of_match);
1980 if (!np)
1981 return 0;
1982
1983 of_node_put(np);
1984
Will Deacon45ae7cf2013-06-24 18:31:25 +01001985 ret = platform_driver_register(&arm_smmu_driver);
1986 if (ret)
1987 return ret;
1988
1989 /* Oh, for a proper bus abstraction */
Dan Carpenter6614ee72013-08-21 09:34:20 +01001990 if (!iommu_present(&platform_bus_type))
Will Deacon45ae7cf2013-06-24 18:31:25 +01001991 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
1992
Will Deacond123cf82014-02-04 22:17:53 +00001993#ifdef CONFIG_ARM_AMBA
Dan Carpenter6614ee72013-08-21 09:34:20 +01001994 if (!iommu_present(&amba_bustype))
Will Deacon45ae7cf2013-06-24 18:31:25 +01001995 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
Will Deacond123cf82014-02-04 22:17:53 +00001996#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01001997
Will Deacona9a1b0b2014-05-01 18:05:08 +01001998#ifdef CONFIG_PCI
1999 if (!iommu_present(&pci_bus_type))
2000 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2001#endif
2002
Will Deacon45ae7cf2013-06-24 18:31:25 +01002003 return 0;
2004}
2005
2006static void __exit arm_smmu_exit(void)
2007{
2008 return platform_driver_unregister(&arm_smmu_driver);
2009}
2010
Andreas Herrmannb1950b22013-10-01 13:39:05 +01002011subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002012module_exit(arm_smmu_exit);
2013
2014MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2015MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2016MODULE_LICENSE("GPL v2");