blob: 69e7c60792a8e37130f75b447232d25369888cb8 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +030027 * - Extended Stream ID (16 bit)
Will Deacon45ae7cf2013-06-24 18:31:25 +010028 */
29
30#define pr_fmt(fmt) "arm-smmu: " fmt
31
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +000032#include <linux/acpi.h>
33#include <linux/acpi_iort.h>
Robin Murphy1f3d5ca2016-09-12 17:13:49 +010034#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010035#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000036#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010037#include <linux/dma-mapping.h>
38#include <linux/err.h>
39#include <linux/interrupt.h>
40#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010041#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010042#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000043#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010044#include <linux/module.h>
45#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010046#include <linux/of_address.h>
Robin Murphyd6fc5d92016-09-12 17:13:52 +010047#include <linux/of_device.h>
Robin Murphyadfec2e2016-09-12 17:13:55 +010048#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010049#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010050#include <linux/platform_device.h>
51#include <linux/slab.h>
52#include <linux/spinlock.h>
53
54#include <linux/amba/bus.h>
55
Will Deacon518f7132014-11-14 17:17:54 +000056#include "io-pgtable.h"
Rob Clark2b037742017-08-09 10:43:03 -040057#include "arm-smmu-regs.h"
58
59#define ARM_MMU500_ACTLR_CPRE (1 << 1)
60
61#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
Feng Kan74f55d32017-10-11 15:08:39 -070062#define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10)
Rob Clark2b037742017-08-09 10:43:03 -040063#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
64
65#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
66#define TLB_SPIN_COUNT 10
Will Deacon45ae7cf2013-06-24 18:31:25 +010067
Will Deacon45ae7cf2013-06-24 18:31:25 +010068/* Maximum number of context banks per SMMU */
69#define ARM_SMMU_MAX_CBS 128
70
Will Deacon45ae7cf2013-06-24 18:31:25 +010071/* SMMU global address space */
72#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010073#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010074
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000075/*
76 * SMMU global address space with conditional offset to access secure
77 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
78 * nsGFSYNR0: 0x450)
79 */
80#define ARM_SMMU_GR0_NS(smmu) \
81 ((smmu)->base + \
82 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
83 ? 0x400 : 0))
84
Robin Murphyf9a05f02016-04-13 18:13:01 +010085/*
86 * Some 64-bit registers only make sense to write atomically, but in such
87 * cases all the data relevant to AArch32 formats lies within the lower word,
88 * therefore this actually makes more sense than it might first appear.
89 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010090#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010091#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010092#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010093#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010094#endif
95
Will Deacon45ae7cf2013-06-24 18:31:25 +010096/* Translation context bank */
Robin Murphy452107c2017-03-30 17:56:30 +010097#define ARM_SMMU_CB(smmu, n) ((smmu)->cb_base + ((n) << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010098
Eric Augerf3ebee82017-01-19 20:57:55 +000099#define MSI_IOVA_BASE 0x8000000
100#define MSI_IOVA_LENGTH 0x100000
101
Will Deacon4cf740b2014-07-14 19:47:39 +0100102static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000103module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100104MODULE_PARM_DESC(force_stage,
105 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000106static bool disable_bypass;
107module_param(disable_bypass, bool, S_IRUGO);
108MODULE_PARM_DESC(disable_bypass,
109 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100110
Robin Murphy09360402014-08-28 17:51:59 +0100111enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100112 ARM_SMMU_V1,
113 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100114 ARM_SMMU_V2,
115};
116
Robin Murphy67b65a32016-04-13 18:12:57 +0100117enum arm_smmu_implementation {
118 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100119 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100120 CAVIUM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100121};
122
Robin Murphy8e8b2032016-09-12 17:13:50 +0100123struct arm_smmu_s2cr {
Robin Murphy588888a2016-09-12 17:13:54 +0100124 struct iommu_group *group;
125 int count;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100126 enum arm_smmu_s2cr_type type;
127 enum arm_smmu_s2cr_privcfg privcfg;
128 u8 cbndx;
129};
130
131#define s2cr_init_val (struct arm_smmu_s2cr){ \
132 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
133}
134
Will Deacon45ae7cf2013-06-24 18:31:25 +0100135struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100136 u16 mask;
137 u16 id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100138 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100139};
140
Robin Murphy90df3732017-08-08 14:56:14 +0100141struct arm_smmu_cb {
142 u64 ttbr[2];
143 u32 tcr[2];
144 u32 mair[2];
145 struct arm_smmu_cfg *cfg;
146};
147
Will Deacona9a1b0b2014-05-01 18:05:08 +0100148struct arm_smmu_master_cfg {
Robin Murphyf80cd882016-09-14 15:21:39 +0100149 struct arm_smmu_device *smmu;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100150 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100151};
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100152#define INVALID_SMENDX -1
Robin Murphyadfec2e2016-09-12 17:13:55 +0100153#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
154#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000155#define fwspec_smendx(fw, i) \
156 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphyadfec2e2016-09-12 17:13:55 +0100157#define for_each_cfg_sme(fw, i, idx) \
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000158 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100159
160struct arm_smmu_device {
161 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100162
163 void __iomem *base;
Robin Murphy452107c2017-03-30 17:56:30 +0100164 void __iomem *cb_base;
Will Deaconc757e852014-07-30 11:33:25 +0100165 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100166
167#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
168#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
169#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
170#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
171#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000172#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800173#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100174#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
175#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
176#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
177#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
178#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300179#define ARM_SMMU_FEAT_EXIDS (1 << 12)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100180 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000181
182#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
183 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100184 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100185 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100186
187 u32 num_context_banks;
188 u32 num_s2_context_banks;
189 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
Robin Murphy90df3732017-08-08 14:56:14 +0100190 struct arm_smmu_cb *cbs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100191 atomic_t irptndx;
192
193 u32 num_mapping_groups;
Robin Murphy21174242016-09-12 17:13:48 +0100194 u16 streamid_mask;
195 u16 smr_mask_mask;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100196 struct arm_smmu_smr *smrs;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100197 struct arm_smmu_s2cr *s2crs;
Robin Murphy588888a2016-09-12 17:13:54 +0100198 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100199
Will Deacon518f7132014-11-14 17:17:54 +0000200 unsigned long va_size;
201 unsigned long ipa_size;
202 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100203 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100204
205 u32 num_global_irqs;
206 u32 num_context_irqs;
207 unsigned int *irqs;
208
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800209 u32 cavium_id_base; /* Specific to Cavium */
Joerg Roedel9648cbc2017-02-01 18:11:36 +0100210
Will Deacon8e517e72017-07-06 15:55:48 +0100211 spinlock_t global_sync_lock;
212
Joerg Roedel9648cbc2017-02-01 18:11:36 +0100213 /* IOMMU core code handle */
214 struct iommu_device iommu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100215};
216
Robin Murphy7602b872016-04-28 17:12:09 +0100217enum arm_smmu_context_fmt {
218 ARM_SMMU_CTX_FMT_NONE,
219 ARM_SMMU_CTX_FMT_AARCH64,
220 ARM_SMMU_CTX_FMT_AARCH32_L,
221 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100222};
223
224struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100225 u8 cbndx;
226 u8 irptndx;
Robin Murphy280b6832017-03-30 17:56:29 +0100227 union {
228 u16 asid;
229 u16 vmid;
230 };
Will Deacon45ae7cf2013-06-24 18:31:25 +0100231 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100232 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100233};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100234#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100235
Will Deaconc752ce42014-06-25 22:46:31 +0100236enum arm_smmu_domain_stage {
237 ARM_SMMU_DOMAIN_S1 = 0,
238 ARM_SMMU_DOMAIN_S2,
239 ARM_SMMU_DOMAIN_NESTED,
Will Deacon61bc6712017-01-06 16:56:03 +0000240 ARM_SMMU_DOMAIN_BYPASS,
Will Deaconc752ce42014-06-25 22:46:31 +0100241};
242
Will Deacon45ae7cf2013-06-24 18:31:25 +0100243struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100244 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000245 struct io_pgtable_ops *pgtbl_ops;
Robin Murphy32b12442017-09-28 15:55:01 +0100246 const struct iommu_gather_ops *tlb_ops;
Will Deacon44680ee2014-06-25 11:29:12 +0100247 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100248 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000249 struct mutex init_mutex; /* Protects smmu pointer */
Will Deacon8e517e72017-07-06 15:55:48 +0100250 spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
Joerg Roedel1d672632015-03-26 13:43:10 +0100251 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100252};
253
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000254struct arm_smmu_option_prop {
255 u32 opt;
256 const char *prop;
257};
258
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800259static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
260
Robin Murphy021bb842016-09-14 15:26:46 +0100261static bool using_legacy_binding, using_generic_binding;
262
Mitchel Humpherys29073202014-07-08 09:52:18 -0700263static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000264 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
265 { 0, NULL},
266};
267
Joerg Roedel1d672632015-03-26 13:43:10 +0100268static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
269{
270 return container_of(dom, struct arm_smmu_domain, domain);
271}
272
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000273static void parse_driver_options(struct arm_smmu_device *smmu)
274{
275 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700276
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000277 do {
278 if (of_property_read_bool(smmu->dev->of_node,
279 arm_smmu_options[i].prop)) {
280 smmu->options |= arm_smmu_options[i].opt;
281 dev_notice(smmu->dev, "option %s\n",
282 arm_smmu_options[i].prop);
283 }
284 } while (arm_smmu_options[++i].opt);
285}
286
Will Deacon8f68f8e2014-07-15 11:27:08 +0100287static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100288{
289 if (dev_is_pci(dev)) {
290 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700291
Will Deacona9a1b0b2014-05-01 18:05:08 +0100292 while (!pci_is_root_bus(bus))
293 bus = bus->parent;
Robin Murphyf80cd882016-09-14 15:21:39 +0100294 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100295 }
296
Robin Murphyf80cd882016-09-14 15:21:39 +0100297 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100298}
299
Robin Murphyf80cd882016-09-14 15:21:39 +0100300static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100301{
Robin Murphyf80cd882016-09-14 15:21:39 +0100302 *((__be32 *)data) = cpu_to_be32(alias);
303 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100304}
305
Robin Murphyf80cd882016-09-14 15:21:39 +0100306static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100307{
Robin Murphyf80cd882016-09-14 15:21:39 +0100308 struct of_phandle_iterator *it = *(void **)data;
309 struct device_node *np = it->node;
310 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100311
Robin Murphyf80cd882016-09-14 15:21:39 +0100312 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
313 "#stream-id-cells", 0)
314 if (it->node == np) {
315 *(void **)data = dev;
316 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700317 }
Robin Murphyf80cd882016-09-14 15:21:39 +0100318 it->node = np;
319 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100320}
321
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100322static struct platform_driver arm_smmu_driver;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100323static struct iommu_ops arm_smmu_ops;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100324
Robin Murphyadfec2e2016-09-12 17:13:55 +0100325static int arm_smmu_register_legacy_master(struct device *dev,
326 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100327{
Robin Murphyadfec2e2016-09-12 17:13:55 +0100328 struct device *smmu_dev;
Robin Murphyf80cd882016-09-14 15:21:39 +0100329 struct device_node *np;
330 struct of_phandle_iterator it;
331 void *data = &it;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100332 u32 *sids;
Robin Murphyf80cd882016-09-14 15:21:39 +0100333 __be32 pci_sid;
334 int err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100335
Robin Murphyf80cd882016-09-14 15:21:39 +0100336 np = dev_get_dev_node(dev);
337 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
338 of_node_put(np);
339 return -ENODEV;
340 }
341
342 it.node = np;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100343 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
344 __find_legacy_master_phandle);
Robin Murphyadfec2e2016-09-12 17:13:55 +0100345 smmu_dev = data;
Robin Murphyf80cd882016-09-14 15:21:39 +0100346 of_node_put(np);
347 if (err == 0)
348 return -ENODEV;
349 if (err < 0)
350 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100351
Robin Murphyf80cd882016-09-14 15:21:39 +0100352 if (dev_is_pci(dev)) {
353 /* "mmu-masters" assumes Stream ID == Requester ID */
354 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
355 &pci_sid);
356 it.cur = &pci_sid;
357 it.cur_count = 1;
358 }
359
Robin Murphyadfec2e2016-09-12 17:13:55 +0100360 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
361 &arm_smmu_ops);
362 if (err)
363 return err;
364
365 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
366 if (!sids)
Robin Murphyf80cd882016-09-14 15:21:39 +0100367 return -ENOMEM;
368
Robin Murphyadfec2e2016-09-12 17:13:55 +0100369 *smmu = dev_get_drvdata(smmu_dev);
370 of_phandle_iterator_args(&it, sids, it.cur_count);
371 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
372 kfree(sids);
373 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100374}
375
376static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
377{
378 int idx;
379
380 do {
381 idx = find_next_zero_bit(map, end, start);
382 if (idx == end)
383 return -ENOSPC;
384 } while (test_and_set_bit(idx, map));
385
386 return idx;
387}
388
389static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
390{
391 clear_bit(idx, map);
392}
393
394/* Wait for any pending TLB invalidations to complete */
Robin Murphy11febfc2017-03-30 17:56:31 +0100395static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
396 void __iomem *sync, void __iomem *status)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100397{
Robin Murphy8513c892017-03-30 17:56:32 +0100398 unsigned int spin_cnt, delay;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100399
Robin Murphy11febfc2017-03-30 17:56:31 +0100400 writel_relaxed(0, sync);
Robin Murphy8513c892017-03-30 17:56:32 +0100401 for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
402 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
403 if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
404 return;
405 cpu_relax();
Will Deacon45ae7cf2013-06-24 18:31:25 +0100406 }
Robin Murphy8513c892017-03-30 17:56:32 +0100407 udelay(delay);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100408 }
Robin Murphy8513c892017-03-30 17:56:32 +0100409 dev_err_ratelimited(smmu->dev,
410 "TLB sync timed out -- SMMU may be deadlocked\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +0100411}
412
Robin Murphy11febfc2017-03-30 17:56:31 +0100413static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
Will Deacon1463fe42013-07-31 19:21:27 +0100414{
Robin Murphy11febfc2017-03-30 17:56:31 +0100415 void __iomem *base = ARM_SMMU_GR0(smmu);
Will Deacon8e517e72017-07-06 15:55:48 +0100416 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100417
Will Deacon8e517e72017-07-06 15:55:48 +0100418 spin_lock_irqsave(&smmu->global_sync_lock, flags);
Robin Murphy11febfc2017-03-30 17:56:31 +0100419 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
420 base + ARM_SMMU_GR0_sTLBGSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100421 spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000422}
423
Robin Murphy11febfc2017-03-30 17:56:31 +0100424static void arm_smmu_tlb_sync_context(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100425{
Will Deacon518f7132014-11-14 17:17:54 +0000426 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy11febfc2017-03-30 17:56:31 +0100427 struct arm_smmu_device *smmu = smmu_domain->smmu;
428 void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
Will Deacon8e517e72017-07-06 15:55:48 +0100429 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100430
Will Deacon8e517e72017-07-06 15:55:48 +0100431 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy11febfc2017-03-30 17:56:31 +0100432 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
433 base + ARM_SMMU_CB_TLBSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100434 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000435}
436
Robin Murphy11febfc2017-03-30 17:56:31 +0100437static void arm_smmu_tlb_sync_vmid(void *cookie)
438{
439 struct arm_smmu_domain *smmu_domain = cookie;
440
441 arm_smmu_tlb_sync_global(smmu_domain->smmu);
442}
443
444static void arm_smmu_tlb_inv_context_s1(void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000445{
446 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100447 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy11febfc2017-03-30 17:56:31 +0100448 void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
449
450 writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
451 arm_smmu_tlb_sync_context(cookie);
452}
453
454static void arm_smmu_tlb_inv_context_s2(void *cookie)
455{
456 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100457 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy11febfc2017-03-30 17:56:31 +0100458 void __iomem *base = ARM_SMMU_GR0(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100459
Robin Murphy11febfc2017-03-30 17:56:31 +0100460 writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
461 arm_smmu_tlb_sync_global(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100462}
463
Will Deacon518f7132014-11-14 17:17:54 +0000464static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000465 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000466{
467 struct arm_smmu_domain *smmu_domain = cookie;
468 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000469 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Robin Murphy11febfc2017-03-30 17:56:31 +0100470 void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000471
472 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +0000473 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
474
Robin Murphy7602b872016-04-28 17:12:09 +0100475 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000476 iova &= ~12UL;
Robin Murphy280b6832017-03-30 17:56:29 +0100477 iova |= cfg->asid;
Robin Murphy75df1382015-12-07 18:18:52 +0000478 do {
479 writel_relaxed(iova, reg);
480 iova += granule;
481 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000482 } else {
483 iova >>= 12;
Robin Murphy280b6832017-03-30 17:56:29 +0100484 iova |= (u64)cfg->asid << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000485 do {
486 writeq_relaxed(iova, reg);
487 iova += granule >> 12;
488 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000489 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100490 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000491 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
492 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000493 iova >>= 12;
494 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100495 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000496 iova += granule >> 12;
497 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000498 }
499}
500
Robin Murphy11febfc2017-03-30 17:56:31 +0100501/*
502 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
503 * almost negligible, but the benefit of getting the first one in as far ahead
504 * of the sync as possible is significant, hence we don't just make this a
505 * no-op and set .tlb_sync to arm_smmu_inv_context_s2() as you might think.
506 */
507static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
508 size_t granule, bool leaf, void *cookie)
509{
510 struct arm_smmu_domain *smmu_domain = cookie;
511 void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
512
513 writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
514}
515
516static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = {
517 .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
Will Deacon518f7132014-11-14 17:17:54 +0000518 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
Robin Murphy11febfc2017-03-30 17:56:31 +0100519 .tlb_sync = arm_smmu_tlb_sync_context,
520};
521
522static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = {
523 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
524 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
525 .tlb_sync = arm_smmu_tlb_sync_context,
526};
527
528static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = {
529 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
530 .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync,
531 .tlb_sync = arm_smmu_tlb_sync_vmid,
Will Deacon518f7132014-11-14 17:17:54 +0000532};
533
Will Deacon45ae7cf2013-06-24 18:31:25 +0100534static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
535{
Will Deacon3714ce1d2016-08-05 19:49:45 +0100536 u32 fsr, fsynr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100537 unsigned long iova;
538 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100539 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100540 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
541 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100542 void __iomem *cb_base;
543
Robin Murphy452107c2017-03-30 17:56:30 +0100544 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100545 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
546
547 if (!(fsr & FSR_FAULT))
548 return IRQ_NONE;
549
Will Deacon45ae7cf2013-06-24 18:31:25 +0100550 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Robin Murphyf9a05f02016-04-13 18:13:01 +0100551 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100552
Will Deacon3714ce1d2016-08-05 19:49:45 +0100553 dev_err_ratelimited(smmu->dev,
554 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
555 fsr, iova, fsynr, cfg->cbndx);
556
Will Deacon45ae7cf2013-06-24 18:31:25 +0100557 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100558 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100559}
560
561static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
562{
563 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
564 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000565 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100566
567 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
568 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
569 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
570 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
571
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000572 if (!gfsr)
573 return IRQ_NONE;
574
Will Deacon45ae7cf2013-06-24 18:31:25 +0100575 dev_err_ratelimited(smmu->dev,
576 "Unexpected global fault, this could be serious\n");
577 dev_err_ratelimited(smmu->dev,
578 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
579 gfsr, gfsynr0, gfsynr1, gfsynr2);
580
581 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100582 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100583}
584
Will Deacon518f7132014-11-14 17:17:54 +0000585static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
586 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100587{
Will Deacon44680ee2014-06-25 11:29:12 +0100588 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy90df3732017-08-08 14:56:14 +0100589 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
590 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
591
592 cb->cfg = cfg;
593
594 /* TTBCR */
595 if (stage1) {
596 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
597 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
598 } else {
599 cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
600 cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
601 cb->tcr[1] |= TTBCR2_SEP_UPSTREAM;
602 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
603 cb->tcr[1] |= TTBCR2_AS;
604 }
605 } else {
606 cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
607 }
608
609 /* TTBRs */
610 if (stage1) {
611 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
612 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
613 cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
614 } else {
615 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
616 cb->ttbr[0] |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
617 cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
618 cb->ttbr[1] |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
619 }
620 } else {
621 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
622 }
623
624 /* MAIRs (stage-1 only) */
625 if (stage1) {
626 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
627 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
628 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
629 } else {
630 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
631 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
632 }
633 }
634}
635
636static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
637{
638 u32 reg;
639 bool stage1;
640 struct arm_smmu_cb *cb = &smmu->cbs[idx];
641 struct arm_smmu_cfg *cfg = cb->cfg;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100642 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100643
Robin Murphy90df3732017-08-08 14:56:14 +0100644 cb_base = ARM_SMMU_CB(smmu, idx);
645
646 /* Unassigned context banks only need disabling */
647 if (!cfg) {
648 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
649 return;
650 }
651
Will Deacon45ae7cf2013-06-24 18:31:25 +0100652 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100653 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100654
Robin Murphy90df3732017-08-08 14:56:14 +0100655 /* CBA2R */
Will Deacon4a1c93c2015-03-04 12:21:03 +0000656 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100657 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
658 reg = CBA2R_RW64_64BIT;
659 else
660 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800661 /* 16-bit VMIDs live in CBA2R */
662 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Robin Murphy280b6832017-03-30 17:56:29 +0100663 reg |= cfg->vmid << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800664
Robin Murphy90df3732017-08-08 14:56:14 +0100665 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(idx));
Will Deacon4a1c93c2015-03-04 12:21:03 +0000666 }
667
Will Deacon45ae7cf2013-06-24 18:31:25 +0100668 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100669 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100670 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700671 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100672
Will Deacon57ca90f2014-02-06 14:59:05 +0000673 /*
674 * Use the weakest shareability/memory types, so they are
675 * overridden by the ttbcr/pte.
676 */
677 if (stage1) {
678 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
679 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800680 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
681 /* 8-bit VMIDs live in CBAR */
Robin Murphy280b6832017-03-30 17:56:29 +0100682 reg |= cfg->vmid << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000683 }
Robin Murphy90df3732017-08-08 14:56:14 +0100684 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(idx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100685
Sunil Goutham125458a2017-03-28 16:11:12 +0530686 /*
687 * TTBCR
688 * We must write this before the TTBRs, since it determines the
689 * access behaviour of some fields (in particular, ASID[15:8]).
690 */
Robin Murphy90df3732017-08-08 14:56:14 +0100691 if (stage1 && smmu->version > ARM_SMMU_V1)
692 writel_relaxed(cb->tcr[1], cb_base + ARM_SMMU_CB_TTBCR2);
693 writel_relaxed(cb->tcr[0], cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100694
Will Deacon45ae7cf2013-06-24 18:31:25 +0100695 /* TTBRs */
Robin Murphy90df3732017-08-08 14:56:14 +0100696 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
697 writel_relaxed(cfg->asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
698 writel_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0);
699 writel_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100700 } else {
Robin Murphy90df3732017-08-08 14:56:14 +0100701 writeq_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0);
702 if (stage1)
703 writeq_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100704 }
705
Will Deacon518f7132014-11-14 17:17:54 +0000706 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100707 if (stage1) {
Robin Murphy90df3732017-08-08 14:56:14 +0100708 writel_relaxed(cb->mair[0], cb_base + ARM_SMMU_CB_S1_MAIR0);
709 writel_relaxed(cb->mair[1], cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100710 }
711
Will Deacon45ae7cf2013-06-24 18:31:25 +0100712 /* SCTLR */
Robin Murphy60705292016-08-11 17:44:06 +0100713 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100714 if (stage1)
715 reg |= SCTLR_S1_ASIDPNE;
Robin Murphy90df3732017-08-08 14:56:14 +0100716 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
717 reg |= SCTLR_E;
718
Will Deacon25724842013-08-21 13:49:53 +0100719 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100720}
721
722static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100723 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100724{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100725 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000726 unsigned long ias, oas;
727 struct io_pgtable_ops *pgtbl_ops;
728 struct io_pgtable_cfg pgtbl_cfg;
729 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100730 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100731 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100732
Will Deacon518f7132014-11-14 17:17:54 +0000733 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100734 if (smmu_domain->smmu)
735 goto out_unlock;
736
Will Deacon61bc6712017-01-06 16:56:03 +0000737 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
738 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
739 smmu_domain->smmu = smmu;
740 goto out_unlock;
741 }
742
Will Deaconc752ce42014-06-25 22:46:31 +0100743 /*
744 * Mapping the requested stage onto what we support is surprisingly
745 * complicated, mainly because the spec allows S1+S2 SMMUs without
746 * support for nested translation. That means we end up with the
747 * following table:
748 *
749 * Requested Supported Actual
750 * S1 N S1
751 * S1 S1+S2 S1
752 * S1 S2 S2
753 * S1 S1 S1
754 * N N N
755 * N S1+S2 S2
756 * N S2 S2
757 * N S1 S1
758 *
759 * Note that you can't actually request stage-2 mappings.
760 */
761 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
762 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
763 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
764 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
765
Robin Murphy7602b872016-04-28 17:12:09 +0100766 /*
767 * Choosing a suitable context format is even more fiddly. Until we
768 * grow some way for the caller to express a preference, and/or move
769 * the decision into the io-pgtable code where it arguably belongs,
770 * just aim for the closest thing to the rest of the system, and hope
771 * that the hardware isn't esoteric enough that we can't assume AArch64
772 * support to be a superset of AArch32 support...
773 */
774 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
775 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100776 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
777 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
778 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
779 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
780 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100781 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
782 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
783 ARM_SMMU_FEAT_FMT_AARCH64_16K |
784 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
785 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
786
787 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
788 ret = -EINVAL;
789 goto out_unlock;
790 }
791
Will Deaconc752ce42014-06-25 22:46:31 +0100792 switch (smmu_domain->stage) {
793 case ARM_SMMU_DOMAIN_S1:
794 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
795 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000796 ias = smmu->va_size;
797 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100798 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000799 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +0100800 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +0000801 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100802 ias = min(ias, 32UL);
803 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +0100804 } else {
805 fmt = ARM_V7S;
806 ias = min(ias, 32UL);
807 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +0100808 }
Robin Murphy32b12442017-09-28 15:55:01 +0100809 smmu_domain->tlb_ops = &arm_smmu_s1_tlb_ops;
Will Deaconc752ce42014-06-25 22:46:31 +0100810 break;
811 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100812 /*
813 * We will likely want to change this if/when KVM gets
814 * involved.
815 */
Will Deaconc752ce42014-06-25 22:46:31 +0100816 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100817 cfg->cbar = CBAR_TYPE_S2_TRANS;
818 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000819 ias = smmu->ipa_size;
820 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100821 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000822 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100823 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000824 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100825 ias = min(ias, 40UL);
826 oas = min(oas, 40UL);
827 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100828 if (smmu->version == ARM_SMMU_V2)
Robin Murphy32b12442017-09-28 15:55:01 +0100829 smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v2;
Robin Murphy11febfc2017-03-30 17:56:31 +0100830 else
Robin Murphy32b12442017-09-28 15:55:01 +0100831 smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v1;
Will Deaconc752ce42014-06-25 22:46:31 +0100832 break;
833 default:
834 ret = -EINVAL;
835 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100836 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100837 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
838 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200839 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100840 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100841
Will Deacon44680ee2014-06-25 11:29:12 +0100842 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100843 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100844 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
845 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100846 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100847 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100848 }
849
Robin Murphy280b6832017-03-30 17:56:29 +0100850 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
851 cfg->vmid = cfg->cbndx + 1 + smmu->cavium_id_base;
852 else
853 cfg->asid = cfg->cbndx + smmu->cavium_id_base;
854
Will Deacon518f7132014-11-14 17:17:54 +0000855 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100856 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000857 .ias = ias,
858 .oas = oas,
Robin Murphy32b12442017-09-28 15:55:01 +0100859 .tlb = smmu_domain->tlb_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100860 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000861 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100862
Robin Murphy81b3c252017-06-22 16:53:53 +0100863 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
864 pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
865
Will Deacon518f7132014-11-14 17:17:54 +0000866 smmu_domain->smmu = smmu;
867 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
868 if (!pgtbl_ops) {
869 ret = -ENOMEM;
870 goto out_clear_smmu;
871 }
872
Robin Murphyd5466352016-05-09 17:20:09 +0100873 /* Update the domain's page sizes to reflect the page table format */
874 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Robin Murphy455eb7d2016-09-12 17:13:58 +0100875 domain->geometry.aperture_end = (1UL << ias) - 1;
876 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +0000877
878 /* Initialise the context bank with our page table cfg */
879 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
Robin Murphy90df3732017-08-08 14:56:14 +0100880 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000881
882 /*
883 * Request context fault interrupt. Do this last to avoid the
884 * handler seeing a half-initialised domain state.
885 */
Will Deacon44680ee2014-06-25 11:29:12 +0100886 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800887 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
888 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200889 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100890 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100891 cfg->irptndx, irq);
892 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100893 }
894
Will Deacon518f7132014-11-14 17:17:54 +0000895 mutex_unlock(&smmu_domain->init_mutex);
896
897 /* Publish page table ops for map/unmap */
898 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100899 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100900
Will Deacon518f7132014-11-14 17:17:54 +0000901out_clear_smmu:
902 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100903out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +0000904 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100905 return ret;
906}
907
908static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
909{
Joerg Roedel1d672632015-03-26 13:43:10 +0100910 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100911 struct arm_smmu_device *smmu = smmu_domain->smmu;
912 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100913 int irq;
914
Will Deacon61bc6712017-01-06 16:56:03 +0000915 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100916 return;
917
Will Deacon518f7132014-11-14 17:17:54 +0000918 /*
919 * Disable the context bank and free the page tables before freeing
920 * it.
921 */
Robin Murphy90df3732017-08-08 14:56:14 +0100922 smmu->cbs[cfg->cbndx].cfg = NULL;
923 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100924
Will Deacon44680ee2014-06-25 11:29:12 +0100925 if (cfg->irptndx != INVALID_IRPTNDX) {
926 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800927 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100928 }
929
Markus Elfring44830b02015-11-06 18:32:41 +0100930 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +0100931 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100932}
933
Joerg Roedel1d672632015-03-26 13:43:10 +0100934static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100935{
936 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100937
Will Deacon61bc6712017-01-06 16:56:03 +0000938 if (type != IOMMU_DOMAIN_UNMANAGED &&
939 type != IOMMU_DOMAIN_DMA &&
940 type != IOMMU_DOMAIN_IDENTITY)
Joerg Roedel1d672632015-03-26 13:43:10 +0100941 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100942 /*
943 * Allocate the domain and initialise some of its data structures.
944 * We can't really do anything meaningful until we've added a
945 * master.
946 */
947 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
948 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +0100949 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100950
Robin Murphy021bb842016-09-14 15:26:46 +0100951 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
952 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +0000953 kfree(smmu_domain);
954 return NULL;
955 }
956
Will Deacon518f7132014-11-14 17:17:54 +0000957 mutex_init(&smmu_domain->init_mutex);
Robin Murphy523d7422017-06-22 16:53:56 +0100958 spin_lock_init(&smmu_domain->cb_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +0100959
960 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100961}
962
Joerg Roedel1d672632015-03-26 13:43:10 +0100963static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100964{
Joerg Roedel1d672632015-03-26 13:43:10 +0100965 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +0100966
967 /*
968 * Free the domain resources. We assume that all devices have
969 * already been detached.
970 */
Robin Murphy9adb9592016-01-26 18:06:36 +0000971 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100972 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100973 kfree(smmu_domain);
974}
975
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100976static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
977{
978 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyf80cd882016-09-14 15:21:39 +0100979 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100980
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300981 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100982 reg |= SMR_VALID;
983 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
984}
985
Robin Murphy8e8b2032016-09-12 17:13:50 +0100986static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
987{
988 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
989 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
990 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
991 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
992
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300993 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
994 smmu->smrs[idx].valid)
995 reg |= S2CR_EXIDVALID;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100996 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
997}
998
999static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1000{
1001 arm_smmu_write_s2cr(smmu, idx);
1002 if (smmu->smrs)
1003 arm_smmu_write_smr(smmu, idx);
1004}
1005
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001006/*
1007 * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
1008 * should be called after sCR0 is written.
1009 */
1010static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
1011{
1012 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1013 u32 smr;
1014
1015 if (!smmu->smrs)
1016 return;
1017
1018 /*
1019 * SMR.ID bits may not be preserved if the corresponding MASK
1020 * bits are set, so check each one separately. We can reject
1021 * masters later if they try to claim IDs outside these masks.
1022 */
1023 smr = smmu->streamid_mask << SMR_ID_SHIFT;
1024 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1025 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1026 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
1027
1028 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1029 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1030 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1031 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
1032}
1033
Robin Murphy588888a2016-09-12 17:13:54 +01001034static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001035{
1036 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy588888a2016-09-12 17:13:54 +01001037 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001038
Robin Murphy588888a2016-09-12 17:13:54 +01001039 /* Stream indexing is blissfully easy */
1040 if (!smrs)
1041 return id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001042
Robin Murphy588888a2016-09-12 17:13:54 +01001043 /* Validating SMRs is... less so */
1044 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1045 if (!smrs[i].valid) {
1046 /*
1047 * Note the first free entry we come across, which
1048 * we'll claim in the end if nothing else matches.
1049 */
1050 if (free_idx < 0)
1051 free_idx = i;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001052 continue;
1053 }
Robin Murphy588888a2016-09-12 17:13:54 +01001054 /*
1055 * If the new entry is _entirely_ matched by an existing entry,
1056 * then reuse that, with the guarantee that there also cannot
1057 * be any subsequent conflicting entries. In normal use we'd
1058 * expect simply identical entries for this case, but there's
1059 * no harm in accommodating the generalisation.
1060 */
1061 if ((mask & smrs[i].mask) == mask &&
1062 !((id ^ smrs[i].id) & ~smrs[i].mask))
1063 return i;
1064 /*
1065 * If the new entry has any other overlap with an existing one,
1066 * though, then there always exists at least one stream ID
1067 * which would cause a conflict, and we can't allow that risk.
1068 */
1069 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1070 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001071 }
1072
Robin Murphy588888a2016-09-12 17:13:54 +01001073 return free_idx;
1074}
1075
1076static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1077{
1078 if (--smmu->s2crs[idx].count)
1079 return false;
1080
1081 smmu->s2crs[idx] = s2cr_init_val;
1082 if (smmu->smrs)
1083 smmu->smrs[idx].valid = false;
1084
1085 return true;
1086}
1087
1088static int arm_smmu_master_alloc_smes(struct device *dev)
1089{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001090 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1091 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy588888a2016-09-12 17:13:54 +01001092 struct arm_smmu_device *smmu = cfg->smmu;
1093 struct arm_smmu_smr *smrs = smmu->smrs;
1094 struct iommu_group *group;
1095 int i, idx, ret;
1096
1097 mutex_lock(&smmu->stream_map_mutex);
1098 /* Figure out a viable stream map entry allocation */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001099 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy021bb842016-09-14 15:26:46 +01001100 u16 sid = fwspec->ids[i];
1101 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1102
Robin Murphy588888a2016-09-12 17:13:54 +01001103 if (idx != INVALID_SMENDX) {
1104 ret = -EEXIST;
1105 goto out_err;
1106 }
1107
Robin Murphy021bb842016-09-14 15:26:46 +01001108 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy588888a2016-09-12 17:13:54 +01001109 if (ret < 0)
1110 goto out_err;
1111
1112 idx = ret;
1113 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy021bb842016-09-14 15:26:46 +01001114 smrs[idx].id = sid;
1115 smrs[idx].mask = mask;
Robin Murphy588888a2016-09-12 17:13:54 +01001116 smrs[idx].valid = true;
1117 }
1118 smmu->s2crs[idx].count++;
1119 cfg->smendx[i] = (s16)idx;
1120 }
1121
1122 group = iommu_group_get_for_dev(dev);
1123 if (!group)
1124 group = ERR_PTR(-ENOMEM);
1125 if (IS_ERR(group)) {
1126 ret = PTR_ERR(group);
1127 goto out_err;
1128 }
1129 iommu_group_put(group);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001130
Will Deacon45ae7cf2013-06-24 18:31:25 +01001131 /* It worked! Now, poke the actual hardware */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001132 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001133 arm_smmu_write_sme(smmu, idx);
1134 smmu->s2crs[idx].group = group;
1135 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001136
Robin Murphy588888a2016-09-12 17:13:54 +01001137 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001138 return 0;
1139
Robin Murphy588888a2016-09-12 17:13:54 +01001140out_err:
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001141 while (i--) {
Robin Murphy588888a2016-09-12 17:13:54 +01001142 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001143 cfg->smendx[i] = INVALID_SMENDX;
1144 }
Robin Murphy588888a2016-09-12 17:13:54 +01001145 mutex_unlock(&smmu->stream_map_mutex);
1146 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001147}
1148
Robin Murphyadfec2e2016-09-12 17:13:55 +01001149static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001150{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001151 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1152 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphyd3097e32016-09-12 17:13:53 +01001153 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001154
Robin Murphy588888a2016-09-12 17:13:54 +01001155 mutex_lock(&smmu->stream_map_mutex);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001156 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001157 if (arm_smmu_free_sme(smmu, idx))
1158 arm_smmu_write_sme(smmu, idx);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001159 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001160 }
Robin Murphy588888a2016-09-12 17:13:54 +01001161 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001162}
1163
Will Deacon45ae7cf2013-06-24 18:31:25 +01001164static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphyadfec2e2016-09-12 17:13:55 +01001165 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001166{
Will Deacon44680ee2014-06-25 11:29:12 +01001167 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001168 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001169 u8 cbndx = smmu_domain->cfg.cbndx;
Will Deacon61bc6712017-01-06 16:56:03 +00001170 enum arm_smmu_s2cr_type type;
Robin Murphy588888a2016-09-12 17:13:54 +01001171 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001172
Will Deacon61bc6712017-01-06 16:56:03 +00001173 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
1174 type = S2CR_TYPE_BYPASS;
1175 else
1176 type = S2CR_TYPE_TRANS;
1177
Robin Murphyadfec2e2016-09-12 17:13:55 +01001178 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy8e8b2032016-09-12 17:13:50 +01001179 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy588888a2016-09-12 17:13:54 +01001180 continue;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001181
Robin Murphy8e8b2032016-09-12 17:13:50 +01001182 s2cr[idx].type = type;
Sricharan Re1989802017-01-06 18:58:15 +05301183 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001184 s2cr[idx].cbndx = cbndx;
1185 arm_smmu_write_s2cr(smmu, idx);
Will Deacon43b412b2014-07-15 11:22:24 +01001186 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001187 return 0;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001188}
1189
Will Deacon45ae7cf2013-06-24 18:31:25 +01001190static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1191{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001192 int ret;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001193 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1194 struct arm_smmu_device *smmu;
Joerg Roedel1d672632015-03-26 13:43:10 +01001195 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001196
Robin Murphyadfec2e2016-09-12 17:13:55 +01001197 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001198 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1199 return -ENXIO;
1200 }
1201
Robin Murphyfba4f8e2016-10-17 12:06:21 +01001202 /*
1203 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1204 * domains between of_xlate() and add_device() - we have no way to cope
1205 * with that, so until ARM gets converted to rely on groups and default
1206 * domains, just say no (but more politely than by dereferencing NULL).
1207 * This should be at least a WARN_ON once that's sorted.
1208 */
1209 if (!fwspec->iommu_priv)
1210 return -ENODEV;
1211
Robin Murphyadfec2e2016-09-12 17:13:55 +01001212 smmu = fwspec_smmu(fwspec);
Will Deacon518f7132014-11-14 17:17:54 +00001213 /* Ensure that the domain is finalised */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001214 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001215 if (ret < 0)
Will Deacon518f7132014-11-14 17:17:54 +00001216 return ret;
1217
Will Deacon45ae7cf2013-06-24 18:31:25 +01001218 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001219 * Sanity check the domain. We don't support domains across
1220 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001221 */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001222 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001223 dev_err(dev,
1224 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphyadfec2e2016-09-12 17:13:55 +01001225 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001226 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001227 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001228
1229 /* Looks ok, so add the device to the domain */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001230 return arm_smmu_domain_add_master(smmu_domain, fwspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001231}
1232
Will Deacon45ae7cf2013-06-24 18:31:25 +01001233static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001234 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001235{
Robin Murphy523d7422017-06-22 16:53:56 +01001236 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001237
Will Deacon518f7132014-11-14 17:17:54 +00001238 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001239 return -ENODEV;
1240
Robin Murphy523d7422017-06-22 16:53:56 +01001241 return ops->map(ops, iova, paddr, size, prot);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001242}
1243
1244static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1245 size_t size)
1246{
Robin Murphy523d7422017-06-22 16:53:56 +01001247 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001248
Will Deacon518f7132014-11-14 17:17:54 +00001249 if (!ops)
1250 return 0;
1251
Robin Murphy523d7422017-06-22 16:53:56 +01001252 return ops->unmap(ops, iova, size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001253}
1254
Robin Murphy32b12442017-09-28 15:55:01 +01001255static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
1256{
1257 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1258
1259 if (smmu_domain->tlb_ops)
1260 smmu_domain->tlb_ops->tlb_sync(smmu_domain);
1261}
1262
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001263static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1264 dma_addr_t iova)
1265{
Joerg Roedel1d672632015-03-26 13:43:10 +01001266 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001267 struct arm_smmu_device *smmu = smmu_domain->smmu;
1268 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1269 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1270 struct device *dev = smmu->dev;
1271 void __iomem *cb_base;
1272 u32 tmp;
1273 u64 phys;
Robin Murphy523d7422017-06-22 16:53:56 +01001274 unsigned long va, flags;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001275
Robin Murphy452107c2017-03-30 17:56:30 +01001276 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001277
Robin Murphy523d7422017-06-22 16:53:56 +01001278 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy661d9622015-05-27 17:09:34 +01001279 /* ATS1 registers can only be written atomically */
1280 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001281 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001282 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1283 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001284 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001285
1286 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1287 !(tmp & ATSR_ACTIVE), 5, 50)) {
Robin Murphy523d7422017-06-22 16:53:56 +01001288 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001289 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001290 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001291 &iova);
1292 return ops->iova_to_phys(ops, iova);
1293 }
1294
Robin Murphyf9a05f02016-04-13 18:13:01 +01001295 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Robin Murphy523d7422017-06-22 16:53:56 +01001296 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001297 if (phys & CB_PAR_F) {
1298 dev_err(dev, "translation fault!\n");
1299 dev_err(dev, "PAR = 0x%llx\n", phys);
1300 return 0;
1301 }
1302
1303 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1304}
1305
Will Deacon45ae7cf2013-06-24 18:31:25 +01001306static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001307 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001308{
Joerg Roedel1d672632015-03-26 13:43:10 +01001309 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Robin Murphy523d7422017-06-22 16:53:56 +01001310 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001311
Sunil Gouthambdf95922017-04-25 15:27:52 +05301312 if (domain->type == IOMMU_DOMAIN_IDENTITY)
1313 return iova;
1314
Will Deacon518f7132014-11-14 17:17:54 +00001315 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001316 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001317
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001318 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
Robin Murphy523d7422017-06-22 16:53:56 +01001319 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1320 return arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001321
Robin Murphy523d7422017-06-22 16:53:56 +01001322 return ops->iova_to_phys(ops, iova);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001323}
1324
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001325static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001326{
Will Deacond0948942014-06-24 17:30:10 +01001327 switch (cap) {
1328 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001329 /*
1330 * Return true here as the SMMU can always send out coherent
1331 * requests.
1332 */
1333 return true;
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001334 case IOMMU_CAP_NOEXEC:
1335 return true;
Will Deacond0948942014-06-24 17:30:10 +01001336 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001337 return false;
Will Deacond0948942014-06-24 17:30:10 +01001338 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001339}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001340
Robin Murphy021bb842016-09-14 15:26:46 +01001341static int arm_smmu_match_node(struct device *dev, void *data)
1342{
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001343 return dev->fwnode == data;
Robin Murphy021bb842016-09-14 15:26:46 +01001344}
1345
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001346static
1347struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
Robin Murphy021bb842016-09-14 15:26:46 +01001348{
1349 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001350 fwnode, arm_smmu_match_node);
Robin Murphy021bb842016-09-14 15:26:46 +01001351 put_device(dev);
1352 return dev ? dev_get_drvdata(dev) : NULL;
1353}
1354
Will Deacon03edb222015-01-19 14:27:33 +00001355static int arm_smmu_add_device(struct device *dev)
1356{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001357 struct arm_smmu_device *smmu;
Robin Murphyf80cd882016-09-14 15:21:39 +01001358 struct arm_smmu_master_cfg *cfg;
Robin Murphy021bb842016-09-14 15:26:46 +01001359 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphyf80cd882016-09-14 15:21:39 +01001360 int i, ret;
1361
Robin Murphy021bb842016-09-14 15:26:46 +01001362 if (using_legacy_binding) {
1363 ret = arm_smmu_register_legacy_master(dev, &smmu);
Artem Savkova7990c62017-08-08 12:26:02 +02001364
1365 /*
1366 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
1367 * will allocate/initialise a new one. Thus we need to update fwspec for
1368 * later use.
1369 */
1370 fwspec = dev->iommu_fwspec;
Robin Murphy021bb842016-09-14 15:26:46 +01001371 if (ret)
1372 goto out_free;
Robin Murphy3c117b52016-11-02 17:31:32 +00001373 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001374 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
Robin Murphy021bb842016-09-14 15:26:46 +01001375 } else {
1376 return -ENODEV;
1377 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001378
1379 ret = -EINVAL;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001380 for (i = 0; i < fwspec->num_ids; i++) {
1381 u16 sid = fwspec->ids[i];
Robin Murphy021bb842016-09-14 15:26:46 +01001382 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyf80cd882016-09-14 15:21:39 +01001383
Robin Murphyadfec2e2016-09-12 17:13:55 +01001384 if (sid & ~smmu->streamid_mask) {
Robin Murphyf80cd882016-09-14 15:21:39 +01001385 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy021bb842016-09-14 15:26:46 +01001386 sid, smmu->streamid_mask);
1387 goto out_free;
1388 }
1389 if (mask & ~smmu->smr_mask_mask) {
1390 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
Peng Fan6323f472017-04-21 17:03:36 +08001391 mask, smmu->smr_mask_mask);
Robin Murphyf80cd882016-09-14 15:21:39 +01001392 goto out_free;
1393 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001394 }
Will Deacon03edb222015-01-19 14:27:33 +00001395
Robin Murphyadfec2e2016-09-12 17:13:55 +01001396 ret = -ENOMEM;
1397 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1398 GFP_KERNEL);
1399 if (!cfg)
1400 goto out_free;
1401
1402 cfg->smmu = smmu;
1403 fwspec->iommu_priv = cfg;
1404 while (i--)
1405 cfg->smendx[i] = INVALID_SMENDX;
1406
Robin Murphy588888a2016-09-12 17:13:54 +01001407 ret = arm_smmu_master_alloc_smes(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001408 if (ret)
Vivek Gautamc54451a2017-07-06 15:07:00 +05301409 goto out_cfg_free;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001410
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001411 iommu_device_link(&smmu->iommu, dev);
1412
Robin Murphyadfec2e2016-09-12 17:13:55 +01001413 return 0;
Robin Murphyf80cd882016-09-14 15:21:39 +01001414
Vivek Gautamc54451a2017-07-06 15:07:00 +05301415out_cfg_free:
1416 kfree(cfg);
Robin Murphyf80cd882016-09-14 15:21:39 +01001417out_free:
Robin Murphyadfec2e2016-09-12 17:13:55 +01001418 iommu_fwspec_free(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001419 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00001420}
1421
Will Deacon45ae7cf2013-06-24 18:31:25 +01001422static void arm_smmu_remove_device(struct device *dev)
1423{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001424 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001425 struct arm_smmu_master_cfg *cfg;
1426 struct arm_smmu_device *smmu;
1427
Robin Murphy8e8b2032016-09-12 17:13:50 +01001428
Robin Murphyadfec2e2016-09-12 17:13:55 +01001429 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyf80cd882016-09-14 15:21:39 +01001430 return;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001431
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001432 cfg = fwspec->iommu_priv;
1433 smmu = cfg->smmu;
1434
1435 iommu_device_unlink(&smmu->iommu, dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001436 arm_smmu_master_free_smes(fwspec);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001437 iommu_group_remove_device(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001438 kfree(fwspec->iommu_priv);
1439 iommu_fwspec_free(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001440}
1441
Joerg Roedelaf659932015-10-21 23:51:41 +02001442static struct iommu_group *arm_smmu_device_group(struct device *dev)
1443{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001444 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1445 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy588888a2016-09-12 17:13:54 +01001446 struct iommu_group *group = NULL;
1447 int i, idx;
1448
Robin Murphyadfec2e2016-09-12 17:13:55 +01001449 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001450 if (group && smmu->s2crs[idx].group &&
1451 group != smmu->s2crs[idx].group)
1452 return ERR_PTR(-EINVAL);
1453
1454 group = smmu->s2crs[idx].group;
1455 }
1456
1457 if (group)
Robin Murphye1b44cb2016-11-11 17:59:22 +00001458 return iommu_group_ref_get(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001459
1460 if (dev_is_pci(dev))
1461 group = pci_device_group(dev);
1462 else
1463 group = generic_device_group(dev);
1464
Joerg Roedelaf659932015-10-21 23:51:41 +02001465 return group;
1466}
1467
Will Deaconc752ce42014-06-25 22:46:31 +01001468static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1469 enum iommu_attr attr, void *data)
1470{
Joerg Roedel1d672632015-03-26 13:43:10 +01001471 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001472
Will Deacon0834cc22017-01-06 16:28:17 +00001473 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
1474 return -EINVAL;
1475
Will Deaconc752ce42014-06-25 22:46:31 +01001476 switch (attr) {
1477 case DOMAIN_ATTR_NESTING:
1478 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1479 return 0;
1480 default:
1481 return -ENODEV;
1482 }
1483}
1484
1485static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1486 enum iommu_attr attr, void *data)
1487{
Will Deacon518f7132014-11-14 17:17:54 +00001488 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001489 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001490
Will Deacon0834cc22017-01-06 16:28:17 +00001491 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
1492 return -EINVAL;
1493
Will Deacon518f7132014-11-14 17:17:54 +00001494 mutex_lock(&smmu_domain->init_mutex);
1495
Will Deaconc752ce42014-06-25 22:46:31 +01001496 switch (attr) {
1497 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00001498 if (smmu_domain->smmu) {
1499 ret = -EPERM;
1500 goto out_unlock;
1501 }
1502
Will Deaconc752ce42014-06-25 22:46:31 +01001503 if (*(int *)data)
1504 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1505 else
1506 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1507
Will Deacon518f7132014-11-14 17:17:54 +00001508 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001509 default:
Will Deacon518f7132014-11-14 17:17:54 +00001510 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01001511 }
Will Deacon518f7132014-11-14 17:17:54 +00001512
1513out_unlock:
1514 mutex_unlock(&smmu_domain->init_mutex);
1515 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001516}
1517
Robin Murphy021bb842016-09-14 15:26:46 +01001518static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1519{
Robin Murphy56fbf602017-03-31 12:03:33 +01001520 u32 mask, fwid = 0;
Robin Murphy021bb842016-09-14 15:26:46 +01001521
1522 if (args->args_count > 0)
1523 fwid |= (u16)args->args[0];
1524
1525 if (args->args_count > 1)
1526 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
Robin Murphy56fbf602017-03-31 12:03:33 +01001527 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
1528 fwid |= (u16)mask << SMR_MASK_SHIFT;
Robin Murphy021bb842016-09-14 15:26:46 +01001529
1530 return iommu_fwspec_add_ids(dev, &fwid, 1);
1531}
1532
Eric Augerf3ebee82017-01-19 20:57:55 +00001533static void arm_smmu_get_resv_regions(struct device *dev,
1534 struct list_head *head)
1535{
1536 struct iommu_resv_region *region;
1537 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1538
1539 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00001540 prot, IOMMU_RESV_SW_MSI);
Eric Augerf3ebee82017-01-19 20:57:55 +00001541 if (!region)
1542 return;
1543
1544 list_add_tail(&region->list, head);
Robin Murphy273df962017-03-16 17:00:19 +00001545
1546 iommu_dma_get_resv_regions(dev, head);
Eric Augerf3ebee82017-01-19 20:57:55 +00001547}
1548
1549static void arm_smmu_put_resv_regions(struct device *dev,
1550 struct list_head *head)
1551{
1552 struct iommu_resv_region *entry, *next;
1553
1554 list_for_each_entry_safe(entry, next, head, list)
1555 kfree(entry);
1556}
1557
Will Deacon518f7132014-11-14 17:17:54 +00001558static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001559 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001560 .domain_alloc = arm_smmu_domain_alloc,
1561 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001562 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001563 .map = arm_smmu_map,
1564 .unmap = arm_smmu_unmap,
Joerg Roedel76771c92014-12-02 13:07:13 +01001565 .map_sg = default_iommu_map_sg,
Robin Murphy32b12442017-09-28 15:55:01 +01001566 .flush_iotlb_all = arm_smmu_iotlb_sync,
1567 .iotlb_sync = arm_smmu_iotlb_sync,
Will Deaconc752ce42014-06-25 22:46:31 +01001568 .iova_to_phys = arm_smmu_iova_to_phys,
1569 .add_device = arm_smmu_add_device,
1570 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001571 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001572 .domain_get_attr = arm_smmu_domain_get_attr,
1573 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy021bb842016-09-14 15:26:46 +01001574 .of_xlate = arm_smmu_of_xlate,
Eric Augerf3ebee82017-01-19 20:57:55 +00001575 .get_resv_regions = arm_smmu_get_resv_regions,
1576 .put_resv_regions = arm_smmu_put_resv_regions,
Will Deacon518f7132014-11-14 17:17:54 +00001577 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001578};
1579
1580static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1581{
1582 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001583 int i;
Peng Fan3ca37122016-05-03 21:50:30 +08001584 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001585
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001586 /* clear global FSR */
1587 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1588 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001589
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001590 /*
1591 * Reset stream mapping groups: Initial values mark all SMRn as
1592 * invalid and all S2CRn as bypass unless overridden.
1593 */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001594 for (i = 0; i < smmu->num_mapping_groups; ++i)
1595 arm_smmu_write_sme(smmu, i);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001596
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301597 if (smmu->model == ARM_MMU500) {
1598 /*
1599 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1600 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1601 * bit is only present in MMU-500r2 onwards.
1602 */
1603 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1604 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
Peng Fan3ca37122016-05-03 21:50:30 +08001605 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301606 if (major >= 2)
1607 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1608 /*
1609 * Allow unmatched Stream IDs to allocate bypass
1610 * TLB entries for reduced latency.
1611 */
Feng Kan74f55d32017-10-11 15:08:39 -07001612 reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN;
Peng Fan3ca37122016-05-03 21:50:30 +08001613 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1614 }
1615
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001616 /* Make sure all context banks are disabled and clear CB_FSR */
1617 for (i = 0; i < smmu->num_context_banks; ++i) {
Robin Murphy90df3732017-08-08 14:56:14 +01001618 void __iomem *cb_base = ARM_SMMU_CB(smmu, i);
1619
1620 arm_smmu_write_context_bank(smmu, i);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001621 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001622 /*
1623 * Disable MMU-500's not-particularly-beneficial next-page
1624 * prefetcher for the sake of errata #841119 and #826419.
1625 */
1626 if (smmu->model == ARM_MMU500) {
1627 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1628 reg &= ~ARM_MMU500_ACTLR_CPRE;
1629 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1630 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001631 }
Will Deacon1463fe42013-07-31 19:21:27 +01001632
Will Deacon45ae7cf2013-06-24 18:31:25 +01001633 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001634 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1635 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1636
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001637 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001638
Will Deacon45ae7cf2013-06-24 18:31:25 +01001639 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001640 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001641
1642 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001643 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001644
Robin Murphy25a1c962016-02-10 14:25:33 +00001645 /* Enable client access, handling unmatched streams as appropriate */
1646 reg &= ~sCR0_CLIENTPD;
1647 if (disable_bypass)
1648 reg |= sCR0_USFCFG;
1649 else
1650 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001651
1652 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001653 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001654
1655 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001656 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001657
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001658 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1659 reg |= sCR0_VMID16EN;
1660
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001661 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1662 reg |= sCR0_EXIDENABLE;
1663
Will Deacon45ae7cf2013-06-24 18:31:25 +01001664 /* Push the button */
Robin Murphy11febfc2017-03-30 17:56:31 +01001665 arm_smmu_tlb_sync_global(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001666 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001667}
1668
1669static int arm_smmu_id_size_to_bits(int size)
1670{
1671 switch (size) {
1672 case 0:
1673 return 32;
1674 case 1:
1675 return 36;
1676 case 2:
1677 return 40;
1678 case 3:
1679 return 42;
1680 case 4:
1681 return 44;
1682 case 5:
1683 default:
1684 return 48;
1685 }
1686}
1687
1688static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1689{
1690 unsigned long size;
1691 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1692 u32 id;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001693 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001694 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001695
1696 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001697 dev_notice(smmu->dev, "SMMUv%d with:\n",
1698 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001699
1700 /* ID0 */
1701 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001702
1703 /* Restrict available stages based on module parameter */
1704 if (force_stage == 1)
1705 id &= ~(ID0_S2TS | ID0_NTS);
1706 else if (force_stage == 2)
1707 id &= ~(ID0_S1TS | ID0_NTS);
1708
Will Deacon45ae7cf2013-06-24 18:31:25 +01001709 if (id & ID0_S1TS) {
1710 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1711 dev_notice(smmu->dev, "\tstage 1 translation\n");
1712 }
1713
1714 if (id & ID0_S2TS) {
1715 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1716 dev_notice(smmu->dev, "\tstage 2 translation\n");
1717 }
1718
1719 if (id & ID0_NTS) {
1720 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1721 dev_notice(smmu->dev, "\tnested translation\n");
1722 }
1723
1724 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001725 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001726 dev_err(smmu->dev, "\tno translation support!\n");
1727 return -ENODEV;
1728 }
1729
Robin Murphyb7862e32016-04-13 18:13:03 +01001730 if ((id & ID0_S1TS) &&
1731 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001732 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1733 dev_notice(smmu->dev, "\taddress translation ops\n");
1734 }
1735
Robin Murphybae2c2d2015-07-29 19:46:05 +01001736 /*
1737 * In order for DMA API calls to work properly, we must defer to what
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001738 * the FW says about coherency, regardless of what the hardware claims.
Robin Murphybae2c2d2015-07-29 19:46:05 +01001739 * Fortunately, this also opens up a workaround for systems where the
1740 * ID register value has ended up configured incorrectly.
1741 */
Robin Murphybae2c2d2015-07-29 19:46:05 +01001742 cttw_reg = !!(id & ID0_CTTW);
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001743 if (cttw_fw || cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001744 dev_notice(smmu->dev, "\t%scoherent table walk\n",
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001745 cttw_fw ? "" : "non-");
1746 if (cttw_fw != cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001747 dev_notice(smmu->dev,
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001748 "\t(IDR0.CTTW overridden by FW configuration)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001749
Robin Murphy21174242016-09-12 17:13:48 +01001750 /* Max. number of entries we have for stream matching/indexing */
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001751 if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
1752 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1753 size = 1 << 16;
1754 } else {
1755 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
1756 }
Robin Murphy21174242016-09-12 17:13:48 +01001757 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001758 if (id & ID0_SMS) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001759 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy21174242016-09-12 17:13:48 +01001760 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
1761 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001762 dev_err(smmu->dev,
1763 "stream-matching supported, but no SMRs present!\n");
1764 return -ENODEV;
1765 }
1766
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001767 /* Zero-initialised to mark as invalid */
1768 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1769 GFP_KERNEL);
1770 if (!smmu->smrs)
1771 return -ENOMEM;
1772
Will Deacon45ae7cf2013-06-24 18:31:25 +01001773 dev_notice(smmu->dev,
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001774 "\tstream matching with %lu register groups", size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001775 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001776 /* s2cr->type == 0 means translation, so initialise explicitly */
1777 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1778 GFP_KERNEL);
1779 if (!smmu->s2crs)
1780 return -ENOMEM;
1781 for (i = 0; i < size; i++)
1782 smmu->s2crs[i] = s2cr_init_val;
1783
Robin Murphy21174242016-09-12 17:13:48 +01001784 smmu->num_mapping_groups = size;
Robin Murphy588888a2016-09-12 17:13:54 +01001785 mutex_init(&smmu->stream_map_mutex);
Will Deacon8e517e72017-07-06 15:55:48 +01001786 spin_lock_init(&smmu->global_sync_lock);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001787
Robin Murphy7602b872016-04-28 17:12:09 +01001788 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1789 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1790 if (!(id & ID0_PTFS_NO_AARCH32S))
1791 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1792 }
1793
Will Deacon45ae7cf2013-06-24 18:31:25 +01001794 /* ID1 */
1795 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001796 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001797
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001798 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001799 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Robin Murphy452107c2017-03-30 17:56:30 +01001800 size <<= smmu->pgshift;
1801 if (smmu->cb_base != gr0_base + size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001802 dev_warn(smmu->dev,
Robin Murphy452107c2017-03-30 17:56:30 +01001803 "SMMU address space size (0x%lx) differs from mapped region size (0x%tx)!\n",
1804 size * 2, (smmu->cb_base - gr0_base) * 2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001805
Will Deacon518f7132014-11-14 17:17:54 +00001806 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001807 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1808 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1809 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1810 return -ENODEV;
1811 }
1812 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1813 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001814 /*
1815 * Cavium CN88xx erratum #27704.
1816 * Ensure ASID and VMID allocation is unique across all SMMUs in
1817 * the system.
1818 */
1819 if (smmu->model == CAVIUM_SMMUV2) {
1820 smmu->cavium_id_base =
1821 atomic_add_return(smmu->num_context_banks,
1822 &cavium_smmu_context_count);
1823 smmu->cavium_id_base -= smmu->num_context_banks;
Robert Richter53c35dce2017-03-13 11:39:01 +01001824 dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
Robin Murphye086d912016-04-13 18:12:58 +01001825 }
Robin Murphy90df3732017-08-08 14:56:14 +01001826 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
1827 sizeof(*smmu->cbs), GFP_KERNEL);
1828 if (!smmu->cbs)
1829 return -ENOMEM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001830
1831 /* ID2 */
1832 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1833 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001834 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001835
Will Deacon518f7132014-11-14 17:17:54 +00001836 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001837 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001838 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001839
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001840 if (id & ID2_VMID16)
1841 smmu->features |= ARM_SMMU_FEAT_VMID16;
1842
Robin Murphyf1d84542015-03-04 16:41:05 +00001843 /*
1844 * What the page table walker can address actually depends on which
1845 * descriptor format is in use, but since a) we don't know that yet,
1846 * and b) it can vary per context bank, this will have to do...
1847 */
1848 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1849 dev_warn(smmu->dev,
1850 "failed to set DMA mask for table walker\n");
1851
Robin Murphyb7862e32016-04-13 18:13:03 +01001852 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001853 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001854 if (smmu->version == ARM_SMMU_V1_64K)
1855 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001856 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001857 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00001858 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001859 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001860 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001861 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001862 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001863 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001864 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001865 }
1866
Robin Murphy7602b872016-04-28 17:12:09 +01001867 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01001868 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01001869 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01001870 if (smmu->features &
1871 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01001872 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01001873 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01001874 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01001875 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01001876 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01001877
Robin Murphyd5466352016-05-09 17:20:09 +01001878 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1879 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1880 else
1881 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1882 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1883 smmu->pgsize_bitmap);
1884
Will Deacon518f7132014-11-14 17:17:54 +00001885
Will Deacon28d60072014-09-01 16:24:48 +01001886 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1887 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001888 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001889
1890 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1891 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001892 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001893
Will Deacon45ae7cf2013-06-24 18:31:25 +01001894 return 0;
1895}
1896
Robin Murphy67b65a32016-04-13 18:12:57 +01001897struct arm_smmu_match_data {
1898 enum arm_smmu_arch_version version;
1899 enum arm_smmu_implementation model;
1900};
1901
1902#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1903static struct arm_smmu_match_data name = { .version = ver, .model = imp }
1904
1905ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1906ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01001907ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001908ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01001909ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01001910
Joerg Roedel09b52692014-10-02 12:24:45 +02001911static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01001912 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1913 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1914 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01001915 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001916 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01001917 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01001918 { },
1919};
1920MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1921
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001922#ifdef CONFIG_ACPI
1923static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
1924{
1925 int ret = 0;
1926
1927 switch (model) {
1928 case ACPI_IORT_SMMU_V1:
1929 case ACPI_IORT_SMMU_CORELINK_MMU400:
1930 smmu->version = ARM_SMMU_V1;
1931 smmu->model = GENERIC_SMMU;
1932 break;
Robin Murphy84c24372017-06-19 16:41:56 +01001933 case ACPI_IORT_SMMU_CORELINK_MMU401:
1934 smmu->version = ARM_SMMU_V1_64K;
1935 smmu->model = GENERIC_SMMU;
1936 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001937 case ACPI_IORT_SMMU_V2:
1938 smmu->version = ARM_SMMU_V2;
1939 smmu->model = GENERIC_SMMU;
1940 break;
1941 case ACPI_IORT_SMMU_CORELINK_MMU500:
1942 smmu->version = ARM_SMMU_V2;
1943 smmu->model = ARM_MMU500;
1944 break;
Robin Murphy84c24372017-06-19 16:41:56 +01001945 case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
1946 smmu->version = ARM_SMMU_V2;
1947 smmu->model = CAVIUM_SMMUV2;
1948 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001949 default:
1950 ret = -ENODEV;
1951 }
1952
1953 return ret;
1954}
1955
1956static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
1957 struct arm_smmu_device *smmu)
1958{
1959 struct device *dev = smmu->dev;
1960 struct acpi_iort_node *node =
1961 *(struct acpi_iort_node **)dev_get_platdata(dev);
1962 struct acpi_iort_smmu *iort_smmu;
1963 int ret;
1964
1965 /* Retrieve SMMU1/2 specific data */
1966 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
1967
1968 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
1969 if (ret < 0)
1970 return ret;
1971
1972 /* Ignore the configuration access interrupt */
1973 smmu->num_global_irqs = 1;
1974
1975 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
1976 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1977
1978 return 0;
1979}
1980#else
1981static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
1982 struct arm_smmu_device *smmu)
1983{
1984 return -ENODEV;
1985}
1986#endif
1987
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001988static int arm_smmu_device_dt_probe(struct platform_device *pdev,
1989 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001990{
Robin Murphy67b65a32016-04-13 18:12:57 +01001991 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001992 struct device *dev = &pdev->dev;
Robin Murphy021bb842016-09-14 15:26:46 +01001993 bool legacy_binding;
1994
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001995 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1996 &smmu->num_global_irqs)) {
1997 dev_err(dev, "missing #global-interrupts property\n");
1998 return -ENODEV;
1999 }
2000
2001 data = of_device_get_match_data(dev);
2002 smmu->version = data->version;
2003 smmu->model = data->model;
2004
2005 parse_driver_options(smmu);
2006
Robin Murphy021bb842016-09-14 15:26:46 +01002007 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
2008 if (legacy_binding && !using_generic_binding) {
2009 if (!using_legacy_binding)
2010 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
2011 using_legacy_binding = true;
2012 } else if (!legacy_binding && !using_legacy_binding) {
2013 using_generic_binding = true;
2014 } else {
2015 dev_err(dev, "not probing due to mismatched DT properties\n");
2016 return -ENODEV;
2017 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002018
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002019 if (of_dma_is_coherent(dev->of_node))
2020 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2021
2022 return 0;
2023}
2024
Robin Murphyf6810c12017-04-10 16:51:05 +05302025static void arm_smmu_bus_init(void)
2026{
2027 /* Oh, for a proper bus abstraction */
2028 if (!iommu_present(&platform_bus_type))
2029 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2030#ifdef CONFIG_ARM_AMBA
2031 if (!iommu_present(&amba_bustype))
2032 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2033#endif
2034#ifdef CONFIG_PCI
2035 if (!iommu_present(&pci_bus_type)) {
2036 pci_request_acs();
2037 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2038 }
2039#endif
2040}
2041
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002042static int arm_smmu_device_probe(struct platform_device *pdev)
2043{
2044 struct resource *res;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002045 resource_size_t ioaddr;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002046 struct arm_smmu_device *smmu;
2047 struct device *dev = &pdev->dev;
2048 int num_irqs, i, err;
2049
Will Deacon45ae7cf2013-06-24 18:31:25 +01002050 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2051 if (!smmu) {
2052 dev_err(dev, "failed to allocate arm_smmu_device\n");
2053 return -ENOMEM;
2054 }
2055 smmu->dev = dev;
2056
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002057 if (dev->of_node)
2058 err = arm_smmu_device_dt_probe(pdev, smmu);
2059 else
2060 err = arm_smmu_device_acpi_probe(pdev, smmu);
2061
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002062 if (err)
2063 return err;
Robin Murphy09360402014-08-28 17:51:59 +01002064
Will Deacon45ae7cf2013-06-24 18:31:25 +01002065 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002066 ioaddr = res->start;
Julia Lawall8a7f4312013-08-19 12:20:37 +01002067 smmu->base = devm_ioremap_resource(dev, res);
2068 if (IS_ERR(smmu->base))
2069 return PTR_ERR(smmu->base);
Robin Murphy452107c2017-03-30 17:56:30 +01002070 smmu->cb_base = smmu->base + resource_size(res) / 2;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002071
Will Deacon45ae7cf2013-06-24 18:31:25 +01002072 num_irqs = 0;
2073 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2074 num_irqs++;
2075 if (num_irqs > smmu->num_global_irqs)
2076 smmu->num_context_irqs++;
2077 }
2078
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002079 if (!smmu->num_context_irqs) {
2080 dev_err(dev, "found %d interrupts but expected at least %d\n",
2081 num_irqs, smmu->num_global_irqs + 1);
2082 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002083 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002084
2085 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
2086 GFP_KERNEL);
2087 if (!smmu->irqs) {
2088 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2089 return -ENOMEM;
2090 }
2091
2092 for (i = 0; i < num_irqs; ++i) {
2093 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002094
Will Deacon45ae7cf2013-06-24 18:31:25 +01002095 if (irq < 0) {
2096 dev_err(dev, "failed to get irq index %d\n", i);
2097 return -ENODEV;
2098 }
2099 smmu->irqs[i] = irq;
2100 }
2101
Olav Haugan3c8766d2014-08-22 17:12:32 -07002102 err = arm_smmu_device_cfg_probe(smmu);
2103 if (err)
2104 return err;
2105
Robin Murphyb7862e32016-04-13 18:13:03 +01002106 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01002107 smmu->num_context_banks != smmu->num_context_irqs) {
2108 dev_err(dev,
2109 "found only %d context interrupt(s) but %d required\n",
2110 smmu->num_context_irqs, smmu->num_context_banks);
Robin Murphyf80cd882016-09-14 15:21:39 +01002111 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002112 }
2113
Will Deacon45ae7cf2013-06-24 18:31:25 +01002114 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08002115 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2116 arm_smmu_global_fault,
2117 IRQF_SHARED,
2118 "arm-smmu global fault",
2119 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002120 if (err) {
2121 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2122 i, smmu->irqs[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01002123 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002124 }
2125 }
2126
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002127 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2128 "smmu.%pa", &ioaddr);
2129 if (err) {
2130 dev_err(dev, "Failed to register iommu in sysfs\n");
2131 return err;
2132 }
2133
2134 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2135 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2136
2137 err = iommu_device_register(&smmu->iommu);
2138 if (err) {
2139 dev_err(dev, "Failed to register iommu\n");
2140 return err;
2141 }
2142
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002143 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01002144 arm_smmu_device_reset(smmu);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03002145 arm_smmu_test_smr_masks(smmu);
Robin Murphy021bb842016-09-14 15:26:46 +01002146
Robin Murphyf6810c12017-04-10 16:51:05 +05302147 /*
2148 * For ACPI and generic DT bindings, an SMMU will be probed before
2149 * any device which might need it, so we want the bus ops in place
2150 * ready to handle default domain setup as soon as any SMMU exists.
2151 */
2152 if (!using_legacy_binding)
2153 arm_smmu_bus_init();
2154
Will Deacon45ae7cf2013-06-24 18:31:25 +01002155 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002156}
2157
Robin Murphyf6810c12017-04-10 16:51:05 +05302158/*
2159 * With the legacy DT binding in play, though, we have no guarantees about
2160 * probe order, but then we're also not doing default domains, so we can
2161 * delay setting bus ops until we're sure every possible SMMU is ready,
2162 * and that way ensure that no add_device() calls get missed.
2163 */
2164static int arm_smmu_legacy_bus_init(void)
2165{
2166 if (using_legacy_binding)
2167 arm_smmu_bus_init();
2168 return 0;
2169}
2170device_initcall_sync(arm_smmu_legacy_bus_init);
2171
Will Deacon45ae7cf2013-06-24 18:31:25 +01002172static int arm_smmu_device_remove(struct platform_device *pdev)
2173{
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002174 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002175
2176 if (!smmu)
2177 return -ENODEV;
2178
Will Deaconecfadb62013-07-31 19:21:28 +01002179 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002180 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002181
Will Deacon45ae7cf2013-06-24 18:31:25 +01002182 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002183 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002184 return 0;
2185}
2186
Nate Watterson7aa86192017-06-29 18:18:15 -04002187static void arm_smmu_device_shutdown(struct platform_device *pdev)
2188{
2189 arm_smmu_device_remove(pdev);
2190}
2191
Robin Murphya2d866f2017-08-08 14:56:15 +01002192static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
2193{
2194 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2195
2196 arm_smmu_device_reset(smmu);
2197 return 0;
2198}
2199
2200static SIMPLE_DEV_PM_OPS(arm_smmu_pm_ops, NULL, arm_smmu_pm_resume);
2201
Will Deacon45ae7cf2013-06-24 18:31:25 +01002202static struct platform_driver arm_smmu_driver = {
2203 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002204 .name = "arm-smmu",
2205 .of_match_table = of_match_ptr(arm_smmu_of_match),
Robin Murphya2d866f2017-08-08 14:56:15 +01002206 .pm = &arm_smmu_pm_ops,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002207 },
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002208 .probe = arm_smmu_device_probe,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002209 .remove = arm_smmu_device_remove,
Nate Watterson7aa86192017-06-29 18:18:15 -04002210 .shutdown = arm_smmu_device_shutdown,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002211};
Robin Murphyf6810c12017-04-10 16:51:05 +05302212module_platform_driver(arm_smmu_driver);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002213
Robin Murphyb0c560f2018-01-09 16:17:27 +00002214IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1");
2215IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2");
2216IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400");
2217IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401");
2218IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500");
2219IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2");
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002220
Will Deacon45ae7cf2013-06-24 18:31:25 +01002221MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2222MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2223MODULE_LICENSE("GPL v2");