blob: ba18d89d4732f483a3915b265ec614d651b2792d [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +030027 * - Extended Stream ID (16 bit)
Will Deacon45ae7cf2013-06-24 18:31:25 +010028 */
29
30#define pr_fmt(fmt) "arm-smmu: " fmt
31
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +000032#include <linux/acpi.h>
33#include <linux/acpi_iort.h>
Robin Murphy1f3d5ca2016-09-12 17:13:49 +010034#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010035#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000036#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010037#include <linux/dma-mapping.h>
38#include <linux/err.h>
39#include <linux/interrupt.h>
40#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010041#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010042#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000043#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010044#include <linux/module.h>
45#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010046#include <linux/of_address.h>
Robin Murphyd6fc5d92016-09-12 17:13:52 +010047#include <linux/of_device.h>
Robin Murphyadfec2e2016-09-12 17:13:55 +010048#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010049#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010050#include <linux/platform_device.h>
Sricharan R96a299d2018-12-04 11:52:09 +053051#include <linux/pm_runtime.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010052#include <linux/slab.h>
53#include <linux/spinlock.h>
54
55#include <linux/amba/bus.h>
Nipun Guptaeab03e22018-09-10 19:19:18 +053056#include <linux/fsl/mc.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010057
Will Deacon518f7132014-11-14 17:17:54 +000058#include "io-pgtable.h"
Rob Clark2b037742017-08-09 10:43:03 -040059#include "arm-smmu-regs.h"
60
61#define ARM_MMU500_ACTLR_CPRE (1 << 1)
62
63#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
Feng Kan74f55d32017-10-11 15:08:39 -070064#define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10)
Rob Clark2b037742017-08-09 10:43:03 -040065#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
66
67#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
68#define TLB_SPIN_COUNT 10
Will Deacon45ae7cf2013-06-24 18:31:25 +010069
Will Deacon45ae7cf2013-06-24 18:31:25 +010070/* Maximum number of context banks per SMMU */
71#define ARM_SMMU_MAX_CBS 128
72
Will Deacon45ae7cf2013-06-24 18:31:25 +010073/* SMMU global address space */
74#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010075#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010076
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000077/*
78 * SMMU global address space with conditional offset to access secure
79 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
80 * nsGFSYNR0: 0x450)
81 */
82#define ARM_SMMU_GR0_NS(smmu) \
83 ((smmu)->base + \
84 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
85 ? 0x400 : 0))
86
Robin Murphyf9a05f02016-04-13 18:13:01 +010087/*
88 * Some 64-bit registers only make sense to write atomically, but in such
89 * cases all the data relevant to AArch32 formats lies within the lower word,
90 * therefore this actually makes more sense than it might first appear.
91 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010092#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010093#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010094#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010095#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010096#endif
97
Will Deacon45ae7cf2013-06-24 18:31:25 +010098/* Translation context bank */
Robin Murphy452107c2017-03-30 17:56:30 +010099#define ARM_SMMU_CB(smmu, n) ((smmu)->cb_base + ((n) << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100100
Eric Augerf3ebee82017-01-19 20:57:55 +0000101#define MSI_IOVA_BASE 0x8000000
102#define MSI_IOVA_LENGTH 0x100000
103
Will Deacon4cf740b2014-07-14 19:47:39 +0100104static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000105module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100106MODULE_PARM_DESC(force_stage,
107 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000108static bool disable_bypass;
109module_param(disable_bypass, bool, S_IRUGO);
110MODULE_PARM_DESC(disable_bypass,
111 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100112
Robin Murphy09360402014-08-28 17:51:59 +0100113enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100114 ARM_SMMU_V1,
115 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100116 ARM_SMMU_V2,
117};
118
Robin Murphy67b65a32016-04-13 18:12:57 +0100119enum arm_smmu_implementation {
120 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100121 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100122 CAVIUM_SMMUV2,
Vivek Gautam89cddc52018-12-04 11:52:13 +0530123 QCOM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100124};
125
Robin Murphy8e8b2032016-09-12 17:13:50 +0100126struct arm_smmu_s2cr {
Robin Murphy588888a2016-09-12 17:13:54 +0100127 struct iommu_group *group;
128 int count;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100129 enum arm_smmu_s2cr_type type;
130 enum arm_smmu_s2cr_privcfg privcfg;
131 u8 cbndx;
132};
133
134#define s2cr_init_val (struct arm_smmu_s2cr){ \
135 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
136}
137
Will Deacon45ae7cf2013-06-24 18:31:25 +0100138struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100139 u16 mask;
140 u16 id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100141 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100142};
143
Robin Murphy90df3732017-08-08 14:56:14 +0100144struct arm_smmu_cb {
145 u64 ttbr[2];
146 u32 tcr[2];
147 u32 mair[2];
148 struct arm_smmu_cfg *cfg;
149};
150
Will Deacona9a1b0b2014-05-01 18:05:08 +0100151struct arm_smmu_master_cfg {
Robin Murphyf80cd882016-09-14 15:21:39 +0100152 struct arm_smmu_device *smmu;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100153 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100154};
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100155#define INVALID_SMENDX -1
Robin Murphyadfec2e2016-09-12 17:13:55 +0100156#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
157#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000158#define fwspec_smendx(fw, i) \
159 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphyadfec2e2016-09-12 17:13:55 +0100160#define for_each_cfg_sme(fw, i, idx) \
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000161 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100162
163struct arm_smmu_device {
164 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100165
166 void __iomem *base;
Robin Murphy452107c2017-03-30 17:56:30 +0100167 void __iomem *cb_base;
Will Deaconc757e852014-07-30 11:33:25 +0100168 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100169
170#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
171#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
172#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
173#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
174#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000175#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800176#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100177#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
178#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
179#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
180#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
181#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300182#define ARM_SMMU_FEAT_EXIDS (1 << 12)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100183 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000184
185#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
186 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100187 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100188 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100189
190 u32 num_context_banks;
191 u32 num_s2_context_banks;
192 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
Robin Murphy90df3732017-08-08 14:56:14 +0100193 struct arm_smmu_cb *cbs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100194 atomic_t irptndx;
195
196 u32 num_mapping_groups;
Robin Murphy21174242016-09-12 17:13:48 +0100197 u16 streamid_mask;
198 u16 smr_mask_mask;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100199 struct arm_smmu_smr *smrs;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100200 struct arm_smmu_s2cr *s2crs;
Robin Murphy588888a2016-09-12 17:13:54 +0100201 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100202
Will Deacon518f7132014-11-14 17:17:54 +0000203 unsigned long va_size;
204 unsigned long ipa_size;
205 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100206 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100207
208 u32 num_global_irqs;
209 u32 num_context_irqs;
210 unsigned int *irqs;
Sricharan R96a299d2018-12-04 11:52:09 +0530211 struct clk_bulk_data *clks;
212 int num_clks;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100213
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800214 u32 cavium_id_base; /* Specific to Cavium */
Joerg Roedel9648cbc2017-02-01 18:11:36 +0100215
Will Deacon8e517e72017-07-06 15:55:48 +0100216 spinlock_t global_sync_lock;
217
Joerg Roedel9648cbc2017-02-01 18:11:36 +0100218 /* IOMMU core code handle */
219 struct iommu_device iommu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100220};
221
Robin Murphy7602b872016-04-28 17:12:09 +0100222enum arm_smmu_context_fmt {
223 ARM_SMMU_CTX_FMT_NONE,
224 ARM_SMMU_CTX_FMT_AARCH64,
225 ARM_SMMU_CTX_FMT_AARCH32_L,
226 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100227};
228
229struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100230 u8 cbndx;
231 u8 irptndx;
Robin Murphy280b6832017-03-30 17:56:29 +0100232 union {
233 u16 asid;
234 u16 vmid;
235 };
Will Deacon45ae7cf2013-06-24 18:31:25 +0100236 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100237 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100238};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100239#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100240
Will Deaconc752ce42014-06-25 22:46:31 +0100241enum arm_smmu_domain_stage {
242 ARM_SMMU_DOMAIN_S1 = 0,
243 ARM_SMMU_DOMAIN_S2,
244 ARM_SMMU_DOMAIN_NESTED,
Will Deacon61bc6712017-01-06 16:56:03 +0000245 ARM_SMMU_DOMAIN_BYPASS,
Will Deaconc752ce42014-06-25 22:46:31 +0100246};
247
Will Deacon45ae7cf2013-06-24 18:31:25 +0100248struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100249 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000250 struct io_pgtable_ops *pgtbl_ops;
Robin Murphy32b12442017-09-28 15:55:01 +0100251 const struct iommu_gather_ops *tlb_ops;
Will Deacon44680ee2014-06-25 11:29:12 +0100252 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100253 enum arm_smmu_domain_stage stage;
Robin Murphy44f68762018-09-20 17:10:27 +0100254 bool non_strict;
Will Deacon518f7132014-11-14 17:17:54 +0000255 struct mutex init_mutex; /* Protects smmu pointer */
Will Deacon8e517e72017-07-06 15:55:48 +0100256 spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
Joerg Roedel1d672632015-03-26 13:43:10 +0100257 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100258};
259
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000260struct arm_smmu_option_prop {
261 u32 opt;
262 const char *prop;
263};
264
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800265static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
266
Robin Murphy021bb842016-09-14 15:26:46 +0100267static bool using_legacy_binding, using_generic_binding;
268
Mitchel Humpherys29073202014-07-08 09:52:18 -0700269static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000270 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
271 { 0, NULL},
272};
273
Sricharan Rd4a44f02018-12-04 11:52:10 +0530274static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
275{
276 if (pm_runtime_enabled(smmu->dev))
277 return pm_runtime_get_sync(smmu->dev);
278
279 return 0;
280}
281
282static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
283{
284 if (pm_runtime_enabled(smmu->dev))
285 pm_runtime_put(smmu->dev);
286}
287
Joerg Roedel1d672632015-03-26 13:43:10 +0100288static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
289{
290 return container_of(dom, struct arm_smmu_domain, domain);
291}
292
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000293static void parse_driver_options(struct arm_smmu_device *smmu)
294{
295 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700296
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000297 do {
298 if (of_property_read_bool(smmu->dev->of_node,
299 arm_smmu_options[i].prop)) {
300 smmu->options |= arm_smmu_options[i].opt;
301 dev_notice(smmu->dev, "option %s\n",
302 arm_smmu_options[i].prop);
303 }
304 } while (arm_smmu_options[++i].opt);
305}
306
Will Deacon8f68f8e2014-07-15 11:27:08 +0100307static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100308{
309 if (dev_is_pci(dev)) {
310 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700311
Will Deacona9a1b0b2014-05-01 18:05:08 +0100312 while (!pci_is_root_bus(bus))
313 bus = bus->parent;
Robin Murphyf80cd882016-09-14 15:21:39 +0100314 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100315 }
316
Robin Murphyf80cd882016-09-14 15:21:39 +0100317 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100318}
319
Robin Murphyf80cd882016-09-14 15:21:39 +0100320static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100321{
Robin Murphyf80cd882016-09-14 15:21:39 +0100322 *((__be32 *)data) = cpu_to_be32(alias);
323 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100324}
325
Robin Murphyf80cd882016-09-14 15:21:39 +0100326static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100327{
Robin Murphyf80cd882016-09-14 15:21:39 +0100328 struct of_phandle_iterator *it = *(void **)data;
329 struct device_node *np = it->node;
330 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100331
Robin Murphyf80cd882016-09-14 15:21:39 +0100332 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
333 "#stream-id-cells", 0)
334 if (it->node == np) {
335 *(void **)data = dev;
336 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700337 }
Robin Murphyf80cd882016-09-14 15:21:39 +0100338 it->node = np;
339 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100340}
341
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100342static struct platform_driver arm_smmu_driver;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100343static struct iommu_ops arm_smmu_ops;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100344
Robin Murphyadfec2e2016-09-12 17:13:55 +0100345static int arm_smmu_register_legacy_master(struct device *dev,
346 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100347{
Robin Murphyadfec2e2016-09-12 17:13:55 +0100348 struct device *smmu_dev;
Robin Murphyf80cd882016-09-14 15:21:39 +0100349 struct device_node *np;
350 struct of_phandle_iterator it;
351 void *data = &it;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100352 u32 *sids;
Robin Murphyf80cd882016-09-14 15:21:39 +0100353 __be32 pci_sid;
354 int err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100355
Robin Murphyf80cd882016-09-14 15:21:39 +0100356 np = dev_get_dev_node(dev);
357 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
358 of_node_put(np);
359 return -ENODEV;
360 }
361
362 it.node = np;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100363 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
364 __find_legacy_master_phandle);
Robin Murphyadfec2e2016-09-12 17:13:55 +0100365 smmu_dev = data;
Robin Murphyf80cd882016-09-14 15:21:39 +0100366 of_node_put(np);
367 if (err == 0)
368 return -ENODEV;
369 if (err < 0)
370 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100371
Robin Murphyf80cd882016-09-14 15:21:39 +0100372 if (dev_is_pci(dev)) {
373 /* "mmu-masters" assumes Stream ID == Requester ID */
374 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
375 &pci_sid);
376 it.cur = &pci_sid;
377 it.cur_count = 1;
378 }
379
Robin Murphyadfec2e2016-09-12 17:13:55 +0100380 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
381 &arm_smmu_ops);
382 if (err)
383 return err;
384
385 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
386 if (!sids)
Robin Murphyf80cd882016-09-14 15:21:39 +0100387 return -ENOMEM;
388
Robin Murphyadfec2e2016-09-12 17:13:55 +0100389 *smmu = dev_get_drvdata(smmu_dev);
390 of_phandle_iterator_args(&it, sids, it.cur_count);
391 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
392 kfree(sids);
393 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100394}
395
396static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
397{
398 int idx;
399
400 do {
401 idx = find_next_zero_bit(map, end, start);
402 if (idx == end)
403 return -ENOSPC;
404 } while (test_and_set_bit(idx, map));
405
406 return idx;
407}
408
409static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
410{
411 clear_bit(idx, map);
412}
413
414/* Wait for any pending TLB invalidations to complete */
Robin Murphy11febfc2017-03-30 17:56:31 +0100415static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
416 void __iomem *sync, void __iomem *status)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100417{
Robin Murphy8513c892017-03-30 17:56:32 +0100418 unsigned int spin_cnt, delay;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100419
Robin Murphy11febfc2017-03-30 17:56:31 +0100420 writel_relaxed(0, sync);
Robin Murphy8513c892017-03-30 17:56:32 +0100421 for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
422 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
423 if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
424 return;
425 cpu_relax();
Will Deacon45ae7cf2013-06-24 18:31:25 +0100426 }
Robin Murphy8513c892017-03-30 17:56:32 +0100427 udelay(delay);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100428 }
Robin Murphy8513c892017-03-30 17:56:32 +0100429 dev_err_ratelimited(smmu->dev,
430 "TLB sync timed out -- SMMU may be deadlocked\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +0100431}
432
Robin Murphy11febfc2017-03-30 17:56:31 +0100433static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
Will Deacon1463fe42013-07-31 19:21:27 +0100434{
Robin Murphy11febfc2017-03-30 17:56:31 +0100435 void __iomem *base = ARM_SMMU_GR0(smmu);
Will Deacon8e517e72017-07-06 15:55:48 +0100436 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100437
Will Deacon8e517e72017-07-06 15:55:48 +0100438 spin_lock_irqsave(&smmu->global_sync_lock, flags);
Robin Murphy11febfc2017-03-30 17:56:31 +0100439 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
440 base + ARM_SMMU_GR0_sTLBGSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100441 spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000442}
443
Robin Murphy11febfc2017-03-30 17:56:31 +0100444static void arm_smmu_tlb_sync_context(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100445{
Will Deacon518f7132014-11-14 17:17:54 +0000446 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy11febfc2017-03-30 17:56:31 +0100447 struct arm_smmu_device *smmu = smmu_domain->smmu;
448 void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
Will Deacon8e517e72017-07-06 15:55:48 +0100449 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100450
Will Deacon8e517e72017-07-06 15:55:48 +0100451 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy11febfc2017-03-30 17:56:31 +0100452 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
453 base + ARM_SMMU_CB_TLBSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100454 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000455}
456
Robin Murphy11febfc2017-03-30 17:56:31 +0100457static void arm_smmu_tlb_sync_vmid(void *cookie)
458{
459 struct arm_smmu_domain *smmu_domain = cookie;
460
461 arm_smmu_tlb_sync_global(smmu_domain->smmu);
462}
463
464static void arm_smmu_tlb_inv_context_s1(void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000465{
466 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100467 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy11febfc2017-03-30 17:56:31 +0100468 void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
469
Robin Murphy44f68762018-09-20 17:10:27 +0100470 /*
471 * NOTE: this is not a relaxed write; it needs to guarantee that PTEs
472 * cleared by the current CPU are visible to the SMMU before the TLBI.
473 */
474 writel(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
Robin Murphy11febfc2017-03-30 17:56:31 +0100475 arm_smmu_tlb_sync_context(cookie);
476}
477
478static void arm_smmu_tlb_inv_context_s2(void *cookie)
479{
480 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100481 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy11febfc2017-03-30 17:56:31 +0100482 void __iomem *base = ARM_SMMU_GR0(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100483
Robin Murphy44f68762018-09-20 17:10:27 +0100484 /* NOTE: see above */
485 writel(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
Robin Murphy11febfc2017-03-30 17:56:31 +0100486 arm_smmu_tlb_sync_global(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100487}
488
Will Deacon518f7132014-11-14 17:17:54 +0000489static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000490 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000491{
492 struct arm_smmu_domain *smmu_domain = cookie;
493 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000494 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Robin Murphy11febfc2017-03-30 17:56:31 +0100495 void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000496
Will Deacon7d321bd32018-10-01 12:42:49 +0100497 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
498 wmb();
499
Will Deacon518f7132014-11-14 17:17:54 +0000500 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +0000501 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
502
Robin Murphy7602b872016-04-28 17:12:09 +0100503 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000504 iova &= ~12UL;
Robin Murphy280b6832017-03-30 17:56:29 +0100505 iova |= cfg->asid;
Robin Murphy75df1382015-12-07 18:18:52 +0000506 do {
507 writel_relaxed(iova, reg);
508 iova += granule;
509 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000510 } else {
511 iova >>= 12;
Robin Murphy280b6832017-03-30 17:56:29 +0100512 iova |= (u64)cfg->asid << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000513 do {
514 writeq_relaxed(iova, reg);
515 iova += granule >> 12;
516 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000517 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100518 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000519 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
520 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000521 iova >>= 12;
522 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100523 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000524 iova += granule >> 12;
525 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000526 }
527}
528
Robin Murphy11febfc2017-03-30 17:56:31 +0100529/*
530 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
531 * almost negligible, but the benefit of getting the first one in as far ahead
532 * of the sync as possible is significant, hence we don't just make this a
533 * no-op and set .tlb_sync to arm_smmu_inv_context_s2() as you might think.
534 */
535static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
536 size_t granule, bool leaf, void *cookie)
537{
538 struct arm_smmu_domain *smmu_domain = cookie;
539 void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
540
Will Deacon7d321bd32018-10-01 12:42:49 +0100541 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
542 wmb();
543
Robin Murphy11febfc2017-03-30 17:56:31 +0100544 writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
545}
546
547static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = {
548 .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
Will Deacon518f7132014-11-14 17:17:54 +0000549 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
Robin Murphy11febfc2017-03-30 17:56:31 +0100550 .tlb_sync = arm_smmu_tlb_sync_context,
551};
552
553static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = {
554 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
555 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
556 .tlb_sync = arm_smmu_tlb_sync_context,
557};
558
559static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = {
560 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
561 .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync,
562 .tlb_sync = arm_smmu_tlb_sync_vmid,
Will Deacon518f7132014-11-14 17:17:54 +0000563};
564
Will Deacon45ae7cf2013-06-24 18:31:25 +0100565static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
566{
Will Deacon3714ce1d2016-08-05 19:49:45 +0100567 u32 fsr, fsynr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100568 unsigned long iova;
569 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100570 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100571 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
572 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100573 void __iomem *cb_base;
574
Robin Murphy452107c2017-03-30 17:56:30 +0100575 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100576 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
577
578 if (!(fsr & FSR_FAULT))
579 return IRQ_NONE;
580
Will Deacon45ae7cf2013-06-24 18:31:25 +0100581 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Robin Murphyf9a05f02016-04-13 18:13:01 +0100582 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100583
Will Deacon3714ce1d2016-08-05 19:49:45 +0100584 dev_err_ratelimited(smmu->dev,
585 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
586 fsr, iova, fsynr, cfg->cbndx);
587
Will Deacon45ae7cf2013-06-24 18:31:25 +0100588 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100589 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100590}
591
592static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
593{
594 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
595 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000596 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100597
598 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
599 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
600 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
601 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
602
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000603 if (!gfsr)
604 return IRQ_NONE;
605
Will Deacon45ae7cf2013-06-24 18:31:25 +0100606 dev_err_ratelimited(smmu->dev,
607 "Unexpected global fault, this could be serious\n");
608 dev_err_ratelimited(smmu->dev,
609 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
610 gfsr, gfsynr0, gfsynr1, gfsynr2);
611
612 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100613 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100614}
615
Will Deacon518f7132014-11-14 17:17:54 +0000616static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
617 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100618{
Will Deacon44680ee2014-06-25 11:29:12 +0100619 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy90df3732017-08-08 14:56:14 +0100620 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
621 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
622
623 cb->cfg = cfg;
624
625 /* TTBCR */
626 if (stage1) {
627 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
628 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
629 } else {
630 cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
631 cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
632 cb->tcr[1] |= TTBCR2_SEP_UPSTREAM;
633 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
634 cb->tcr[1] |= TTBCR2_AS;
635 }
636 } else {
637 cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
638 }
639
640 /* TTBRs */
641 if (stage1) {
642 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
643 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
644 cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
645 } else {
646 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
647 cb->ttbr[0] |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
648 cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
649 cb->ttbr[1] |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
650 }
651 } else {
652 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
653 }
654
655 /* MAIRs (stage-1 only) */
656 if (stage1) {
657 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
658 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
659 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
660 } else {
661 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
662 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
663 }
664 }
665}
666
667static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
668{
669 u32 reg;
670 bool stage1;
671 struct arm_smmu_cb *cb = &smmu->cbs[idx];
672 struct arm_smmu_cfg *cfg = cb->cfg;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100673 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100674
Robin Murphy90df3732017-08-08 14:56:14 +0100675 cb_base = ARM_SMMU_CB(smmu, idx);
676
677 /* Unassigned context banks only need disabling */
678 if (!cfg) {
679 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
680 return;
681 }
682
Will Deacon45ae7cf2013-06-24 18:31:25 +0100683 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100684 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100685
Robin Murphy90df3732017-08-08 14:56:14 +0100686 /* CBA2R */
Will Deacon4a1c93c2015-03-04 12:21:03 +0000687 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100688 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
689 reg = CBA2R_RW64_64BIT;
690 else
691 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800692 /* 16-bit VMIDs live in CBA2R */
693 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Robin Murphy280b6832017-03-30 17:56:29 +0100694 reg |= cfg->vmid << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800695
Robin Murphy90df3732017-08-08 14:56:14 +0100696 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(idx));
Will Deacon4a1c93c2015-03-04 12:21:03 +0000697 }
698
Will Deacon45ae7cf2013-06-24 18:31:25 +0100699 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100700 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100701 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700702 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100703
Will Deacon57ca90f2014-02-06 14:59:05 +0000704 /*
705 * Use the weakest shareability/memory types, so they are
706 * overridden by the ttbcr/pte.
707 */
708 if (stage1) {
709 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
710 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800711 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
712 /* 8-bit VMIDs live in CBAR */
Robin Murphy280b6832017-03-30 17:56:29 +0100713 reg |= cfg->vmid << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000714 }
Robin Murphy90df3732017-08-08 14:56:14 +0100715 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(idx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100716
Sunil Goutham125458a2017-03-28 16:11:12 +0530717 /*
718 * TTBCR
719 * We must write this before the TTBRs, since it determines the
720 * access behaviour of some fields (in particular, ASID[15:8]).
721 */
Robin Murphy90df3732017-08-08 14:56:14 +0100722 if (stage1 && smmu->version > ARM_SMMU_V1)
723 writel_relaxed(cb->tcr[1], cb_base + ARM_SMMU_CB_TTBCR2);
724 writel_relaxed(cb->tcr[0], cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100725
Will Deacon45ae7cf2013-06-24 18:31:25 +0100726 /* TTBRs */
Robin Murphy90df3732017-08-08 14:56:14 +0100727 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
728 writel_relaxed(cfg->asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
729 writel_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0);
730 writel_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100731 } else {
Robin Murphy90df3732017-08-08 14:56:14 +0100732 writeq_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0);
733 if (stage1)
734 writeq_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100735 }
736
Will Deacon518f7132014-11-14 17:17:54 +0000737 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100738 if (stage1) {
Robin Murphy90df3732017-08-08 14:56:14 +0100739 writel_relaxed(cb->mair[0], cb_base + ARM_SMMU_CB_S1_MAIR0);
740 writel_relaxed(cb->mair[1], cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100741 }
742
Will Deacon45ae7cf2013-06-24 18:31:25 +0100743 /* SCTLR */
Robin Murphy60705292016-08-11 17:44:06 +0100744 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100745 if (stage1)
746 reg |= SCTLR_S1_ASIDPNE;
Robin Murphy90df3732017-08-08 14:56:14 +0100747 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
748 reg |= SCTLR_E;
749
Will Deacon25724842013-08-21 13:49:53 +0100750 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100751}
752
753static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100754 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100755{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100756 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000757 unsigned long ias, oas;
758 struct io_pgtable_ops *pgtbl_ops;
759 struct io_pgtable_cfg pgtbl_cfg;
760 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100761 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100762 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100763
Will Deacon518f7132014-11-14 17:17:54 +0000764 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100765 if (smmu_domain->smmu)
766 goto out_unlock;
767
Will Deacon61bc6712017-01-06 16:56:03 +0000768 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
769 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
770 smmu_domain->smmu = smmu;
771 goto out_unlock;
772 }
773
Will Deaconc752ce42014-06-25 22:46:31 +0100774 /*
775 * Mapping the requested stage onto what we support is surprisingly
776 * complicated, mainly because the spec allows S1+S2 SMMUs without
777 * support for nested translation. That means we end up with the
778 * following table:
779 *
780 * Requested Supported Actual
781 * S1 N S1
782 * S1 S1+S2 S1
783 * S1 S2 S2
784 * S1 S1 S1
785 * N N N
786 * N S1+S2 S2
787 * N S2 S2
788 * N S1 S1
789 *
790 * Note that you can't actually request stage-2 mappings.
791 */
792 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
793 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
794 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
795 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
796
Robin Murphy7602b872016-04-28 17:12:09 +0100797 /*
798 * Choosing a suitable context format is even more fiddly. Until we
799 * grow some way for the caller to express a preference, and/or move
800 * the decision into the io-pgtable code where it arguably belongs,
801 * just aim for the closest thing to the rest of the system, and hope
802 * that the hardware isn't esoteric enough that we can't assume AArch64
803 * support to be a superset of AArch32 support...
804 */
805 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
806 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100807 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
808 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
809 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
810 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
811 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100812 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
813 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
814 ARM_SMMU_FEAT_FMT_AARCH64_16K |
815 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
816 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
817
818 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
819 ret = -EINVAL;
820 goto out_unlock;
821 }
822
Will Deaconc752ce42014-06-25 22:46:31 +0100823 switch (smmu_domain->stage) {
824 case ARM_SMMU_DOMAIN_S1:
825 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
826 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000827 ias = smmu->va_size;
828 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100829 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000830 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +0100831 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +0000832 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100833 ias = min(ias, 32UL);
834 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +0100835 } else {
836 fmt = ARM_V7S;
837 ias = min(ias, 32UL);
838 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +0100839 }
Robin Murphy32b12442017-09-28 15:55:01 +0100840 smmu_domain->tlb_ops = &arm_smmu_s1_tlb_ops;
Will Deaconc752ce42014-06-25 22:46:31 +0100841 break;
842 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100843 /*
844 * We will likely want to change this if/when KVM gets
845 * involved.
846 */
Will Deaconc752ce42014-06-25 22:46:31 +0100847 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100848 cfg->cbar = CBAR_TYPE_S2_TRANS;
849 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000850 ias = smmu->ipa_size;
851 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100852 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000853 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100854 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000855 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100856 ias = min(ias, 40UL);
857 oas = min(oas, 40UL);
858 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100859 if (smmu->version == ARM_SMMU_V2)
Robin Murphy32b12442017-09-28 15:55:01 +0100860 smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v2;
Robin Murphy11febfc2017-03-30 17:56:31 +0100861 else
Robin Murphy32b12442017-09-28 15:55:01 +0100862 smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v1;
Will Deaconc752ce42014-06-25 22:46:31 +0100863 break;
864 default:
865 ret = -EINVAL;
866 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100867 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100868 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
869 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200870 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100871 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100872
Will Deacon44680ee2014-06-25 11:29:12 +0100873 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100874 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100875 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
876 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100877 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100878 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100879 }
880
Robin Murphy280b6832017-03-30 17:56:29 +0100881 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
882 cfg->vmid = cfg->cbndx + 1 + smmu->cavium_id_base;
883 else
884 cfg->asid = cfg->cbndx + smmu->cavium_id_base;
885
Will Deacon518f7132014-11-14 17:17:54 +0000886 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100887 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000888 .ias = ias,
889 .oas = oas,
Robin Murphy32b12442017-09-28 15:55:01 +0100890 .tlb = smmu_domain->tlb_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100891 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000892 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100893
Robin Murphy81b3c252017-06-22 16:53:53 +0100894 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
895 pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
896
Robin Murphy44f68762018-09-20 17:10:27 +0100897 if (smmu_domain->non_strict)
898 pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
899
Will Deacon518f7132014-11-14 17:17:54 +0000900 smmu_domain->smmu = smmu;
901 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
902 if (!pgtbl_ops) {
903 ret = -ENOMEM;
904 goto out_clear_smmu;
905 }
906
Robin Murphyd5466352016-05-09 17:20:09 +0100907 /* Update the domain's page sizes to reflect the page table format */
908 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Robin Murphy455eb7d2016-09-12 17:13:58 +0100909 domain->geometry.aperture_end = (1UL << ias) - 1;
910 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +0000911
912 /* Initialise the context bank with our page table cfg */
913 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
Robin Murphy90df3732017-08-08 14:56:14 +0100914 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000915
916 /*
917 * Request context fault interrupt. Do this last to avoid the
918 * handler seeing a half-initialised domain state.
919 */
Will Deacon44680ee2014-06-25 11:29:12 +0100920 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800921 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
922 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200923 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100924 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100925 cfg->irptndx, irq);
926 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100927 }
928
Will Deacon518f7132014-11-14 17:17:54 +0000929 mutex_unlock(&smmu_domain->init_mutex);
930
931 /* Publish page table ops for map/unmap */
932 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100933 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100934
Will Deacon518f7132014-11-14 17:17:54 +0000935out_clear_smmu:
936 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100937out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +0000938 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100939 return ret;
940}
941
942static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
943{
Joerg Roedel1d672632015-03-26 13:43:10 +0100944 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100945 struct arm_smmu_device *smmu = smmu_domain->smmu;
946 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Sricharan Rd4a44f02018-12-04 11:52:10 +0530947 int ret, irq;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100948
Will Deacon61bc6712017-01-06 16:56:03 +0000949 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100950 return;
951
Sricharan Rd4a44f02018-12-04 11:52:10 +0530952 ret = arm_smmu_rpm_get(smmu);
953 if (ret < 0)
954 return;
955
Will Deacon518f7132014-11-14 17:17:54 +0000956 /*
957 * Disable the context bank and free the page tables before freeing
958 * it.
959 */
Robin Murphy90df3732017-08-08 14:56:14 +0100960 smmu->cbs[cfg->cbndx].cfg = NULL;
961 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100962
Will Deacon44680ee2014-06-25 11:29:12 +0100963 if (cfg->irptndx != INVALID_IRPTNDX) {
964 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800965 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100966 }
967
Markus Elfring44830b02015-11-06 18:32:41 +0100968 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +0100969 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Sricharan Rd4a44f02018-12-04 11:52:10 +0530970
971 arm_smmu_rpm_put(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100972}
973
Joerg Roedel1d672632015-03-26 13:43:10 +0100974static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100975{
976 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100977
Will Deacon61bc6712017-01-06 16:56:03 +0000978 if (type != IOMMU_DOMAIN_UNMANAGED &&
979 type != IOMMU_DOMAIN_DMA &&
980 type != IOMMU_DOMAIN_IDENTITY)
Joerg Roedel1d672632015-03-26 13:43:10 +0100981 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100982 /*
983 * Allocate the domain and initialise some of its data structures.
984 * We can't really do anything meaningful until we've added a
985 * master.
986 */
987 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
988 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +0100989 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100990
Robin Murphy021bb842016-09-14 15:26:46 +0100991 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
992 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +0000993 kfree(smmu_domain);
994 return NULL;
995 }
996
Will Deacon518f7132014-11-14 17:17:54 +0000997 mutex_init(&smmu_domain->init_mutex);
Robin Murphy523d7422017-06-22 16:53:56 +0100998 spin_lock_init(&smmu_domain->cb_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +0100999
1000 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001001}
1002
Joerg Roedel1d672632015-03-26 13:43:10 +01001003static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001004{
Joerg Roedel1d672632015-03-26 13:43:10 +01001005 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001006
1007 /*
1008 * Free the domain resources. We assume that all devices have
1009 * already been detached.
1010 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001011 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001012 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001013 kfree(smmu_domain);
1014}
1015
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001016static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1017{
1018 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyf80cd882016-09-14 15:21:39 +01001019 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001020
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001021 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001022 reg |= SMR_VALID;
1023 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1024}
1025
Robin Murphy8e8b2032016-09-12 17:13:50 +01001026static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1027{
1028 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1029 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1030 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1031 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1032
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001033 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
1034 smmu->smrs[idx].valid)
1035 reg |= S2CR_EXIDVALID;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001036 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1037}
1038
1039static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1040{
1041 arm_smmu_write_s2cr(smmu, idx);
1042 if (smmu->smrs)
1043 arm_smmu_write_smr(smmu, idx);
1044}
1045
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001046/*
1047 * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
1048 * should be called after sCR0 is written.
1049 */
1050static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
1051{
1052 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1053 u32 smr;
1054
1055 if (!smmu->smrs)
1056 return;
1057
1058 /*
1059 * SMR.ID bits may not be preserved if the corresponding MASK
1060 * bits are set, so check each one separately. We can reject
1061 * masters later if they try to claim IDs outside these masks.
1062 */
1063 smr = smmu->streamid_mask << SMR_ID_SHIFT;
1064 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1065 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1066 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
1067
1068 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1069 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1070 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1071 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
1072}
1073
Robin Murphy588888a2016-09-12 17:13:54 +01001074static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001075{
1076 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy588888a2016-09-12 17:13:54 +01001077 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001078
Robin Murphy588888a2016-09-12 17:13:54 +01001079 /* Stream indexing is blissfully easy */
1080 if (!smrs)
1081 return id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001082
Robin Murphy588888a2016-09-12 17:13:54 +01001083 /* Validating SMRs is... less so */
1084 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1085 if (!smrs[i].valid) {
1086 /*
1087 * Note the first free entry we come across, which
1088 * we'll claim in the end if nothing else matches.
1089 */
1090 if (free_idx < 0)
1091 free_idx = i;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001092 continue;
1093 }
Robin Murphy588888a2016-09-12 17:13:54 +01001094 /*
1095 * If the new entry is _entirely_ matched by an existing entry,
1096 * then reuse that, with the guarantee that there also cannot
1097 * be any subsequent conflicting entries. In normal use we'd
1098 * expect simply identical entries for this case, but there's
1099 * no harm in accommodating the generalisation.
1100 */
1101 if ((mask & smrs[i].mask) == mask &&
1102 !((id ^ smrs[i].id) & ~smrs[i].mask))
1103 return i;
1104 /*
1105 * If the new entry has any other overlap with an existing one,
1106 * though, then there always exists at least one stream ID
1107 * which would cause a conflict, and we can't allow that risk.
1108 */
1109 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1110 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001111 }
1112
Robin Murphy588888a2016-09-12 17:13:54 +01001113 return free_idx;
1114}
1115
1116static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1117{
1118 if (--smmu->s2crs[idx].count)
1119 return false;
1120
1121 smmu->s2crs[idx] = s2cr_init_val;
1122 if (smmu->smrs)
1123 smmu->smrs[idx].valid = false;
1124
1125 return true;
1126}
1127
1128static int arm_smmu_master_alloc_smes(struct device *dev)
1129{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001130 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1131 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy588888a2016-09-12 17:13:54 +01001132 struct arm_smmu_device *smmu = cfg->smmu;
1133 struct arm_smmu_smr *smrs = smmu->smrs;
1134 struct iommu_group *group;
1135 int i, idx, ret;
1136
1137 mutex_lock(&smmu->stream_map_mutex);
1138 /* Figure out a viable stream map entry allocation */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001139 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy021bb842016-09-14 15:26:46 +01001140 u16 sid = fwspec->ids[i];
1141 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1142
Robin Murphy588888a2016-09-12 17:13:54 +01001143 if (idx != INVALID_SMENDX) {
1144 ret = -EEXIST;
1145 goto out_err;
1146 }
1147
Robin Murphy021bb842016-09-14 15:26:46 +01001148 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy588888a2016-09-12 17:13:54 +01001149 if (ret < 0)
1150 goto out_err;
1151
1152 idx = ret;
1153 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy021bb842016-09-14 15:26:46 +01001154 smrs[idx].id = sid;
1155 smrs[idx].mask = mask;
Robin Murphy588888a2016-09-12 17:13:54 +01001156 smrs[idx].valid = true;
1157 }
1158 smmu->s2crs[idx].count++;
1159 cfg->smendx[i] = (s16)idx;
1160 }
1161
1162 group = iommu_group_get_for_dev(dev);
1163 if (!group)
1164 group = ERR_PTR(-ENOMEM);
1165 if (IS_ERR(group)) {
1166 ret = PTR_ERR(group);
1167 goto out_err;
1168 }
1169 iommu_group_put(group);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001170
Will Deacon45ae7cf2013-06-24 18:31:25 +01001171 /* It worked! Now, poke the actual hardware */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001172 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001173 arm_smmu_write_sme(smmu, idx);
1174 smmu->s2crs[idx].group = group;
1175 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001176
Robin Murphy588888a2016-09-12 17:13:54 +01001177 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001178 return 0;
1179
Robin Murphy588888a2016-09-12 17:13:54 +01001180out_err:
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001181 while (i--) {
Robin Murphy588888a2016-09-12 17:13:54 +01001182 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001183 cfg->smendx[i] = INVALID_SMENDX;
1184 }
Robin Murphy588888a2016-09-12 17:13:54 +01001185 mutex_unlock(&smmu->stream_map_mutex);
1186 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001187}
1188
Robin Murphyadfec2e2016-09-12 17:13:55 +01001189static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001190{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001191 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1192 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphyd3097e32016-09-12 17:13:53 +01001193 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001194
Robin Murphy588888a2016-09-12 17:13:54 +01001195 mutex_lock(&smmu->stream_map_mutex);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001196 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001197 if (arm_smmu_free_sme(smmu, idx))
1198 arm_smmu_write_sme(smmu, idx);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001199 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001200 }
Robin Murphy588888a2016-09-12 17:13:54 +01001201 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001202}
1203
Will Deacon45ae7cf2013-06-24 18:31:25 +01001204static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphyadfec2e2016-09-12 17:13:55 +01001205 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001206{
Will Deacon44680ee2014-06-25 11:29:12 +01001207 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001208 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001209 u8 cbndx = smmu_domain->cfg.cbndx;
Will Deacon61bc6712017-01-06 16:56:03 +00001210 enum arm_smmu_s2cr_type type;
Robin Murphy588888a2016-09-12 17:13:54 +01001211 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001212
Will Deacon61bc6712017-01-06 16:56:03 +00001213 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
1214 type = S2CR_TYPE_BYPASS;
1215 else
1216 type = S2CR_TYPE_TRANS;
1217
Robin Murphyadfec2e2016-09-12 17:13:55 +01001218 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy8e8b2032016-09-12 17:13:50 +01001219 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy588888a2016-09-12 17:13:54 +01001220 continue;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001221
Robin Murphy8e8b2032016-09-12 17:13:50 +01001222 s2cr[idx].type = type;
Sricharan Re1989802017-01-06 18:58:15 +05301223 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001224 s2cr[idx].cbndx = cbndx;
1225 arm_smmu_write_s2cr(smmu, idx);
Will Deacon43b412b2014-07-15 11:22:24 +01001226 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001227 return 0;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001228}
1229
Will Deacon45ae7cf2013-06-24 18:31:25 +01001230static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1231{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001232 int ret;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001233 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1234 struct arm_smmu_device *smmu;
Joerg Roedel1d672632015-03-26 13:43:10 +01001235 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001236
Robin Murphyadfec2e2016-09-12 17:13:55 +01001237 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001238 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1239 return -ENXIO;
1240 }
1241
Robin Murphyfba4f8e2016-10-17 12:06:21 +01001242 /*
1243 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1244 * domains between of_xlate() and add_device() - we have no way to cope
1245 * with that, so until ARM gets converted to rely on groups and default
1246 * domains, just say no (but more politely than by dereferencing NULL).
1247 * This should be at least a WARN_ON once that's sorted.
1248 */
1249 if (!fwspec->iommu_priv)
1250 return -ENODEV;
1251
Robin Murphyadfec2e2016-09-12 17:13:55 +01001252 smmu = fwspec_smmu(fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301253
1254 ret = arm_smmu_rpm_get(smmu);
1255 if (ret < 0)
1256 return ret;
1257
Will Deacon518f7132014-11-14 17:17:54 +00001258 /* Ensure that the domain is finalised */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001259 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001260 if (ret < 0)
Sricharan Rd4a44f02018-12-04 11:52:10 +05301261 goto rpm_put;
Will Deacon518f7132014-11-14 17:17:54 +00001262
Will Deacon45ae7cf2013-06-24 18:31:25 +01001263 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001264 * Sanity check the domain. We don't support domains across
1265 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001266 */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001267 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001268 dev_err(dev,
1269 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphyadfec2e2016-09-12 17:13:55 +01001270 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Sricharan Rd4a44f02018-12-04 11:52:10 +05301271 ret = -EINVAL;
1272 goto rpm_put;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001273 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001274
1275 /* Looks ok, so add the device to the domain */
Sricharan Rd4a44f02018-12-04 11:52:10 +05301276 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
1277
1278rpm_put:
1279 arm_smmu_rpm_put(smmu);
1280 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001281}
1282
Will Deacon45ae7cf2013-06-24 18:31:25 +01001283static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001284 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001285{
Robin Murphy523d7422017-06-22 16:53:56 +01001286 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301287 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1288 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001289
Will Deacon518f7132014-11-14 17:17:54 +00001290 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001291 return -ENODEV;
1292
Sricharan Rd4a44f02018-12-04 11:52:10 +05301293 arm_smmu_rpm_get(smmu);
1294 ret = ops->map(ops, iova, paddr, size, prot);
1295 arm_smmu_rpm_put(smmu);
1296
1297 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001298}
1299
1300static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1301 size_t size)
1302{
Robin Murphy523d7422017-06-22 16:53:56 +01001303 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301304 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1305 size_t ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001306
Will Deacon518f7132014-11-14 17:17:54 +00001307 if (!ops)
1308 return 0;
1309
Sricharan Rd4a44f02018-12-04 11:52:10 +05301310 arm_smmu_rpm_get(smmu);
1311 ret = ops->unmap(ops, iova, size);
1312 arm_smmu_rpm_put(smmu);
1313
1314 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001315}
1316
Robin Murphy44f68762018-09-20 17:10:27 +01001317static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
1318{
1319 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301320 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy44f68762018-09-20 17:10:27 +01001321
Sricharan Rd4a44f02018-12-04 11:52:10 +05301322 if (smmu_domain->tlb_ops) {
1323 arm_smmu_rpm_get(smmu);
Robin Murphy44f68762018-09-20 17:10:27 +01001324 smmu_domain->tlb_ops->tlb_flush_all(smmu_domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301325 arm_smmu_rpm_put(smmu);
1326 }
Robin Murphy44f68762018-09-20 17:10:27 +01001327}
1328
Robin Murphy32b12442017-09-28 15:55:01 +01001329static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
1330{
1331 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301332 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy32b12442017-09-28 15:55:01 +01001333
Sricharan Rd4a44f02018-12-04 11:52:10 +05301334 if (smmu_domain->tlb_ops) {
1335 arm_smmu_rpm_get(smmu);
Robin Murphy32b12442017-09-28 15:55:01 +01001336 smmu_domain->tlb_ops->tlb_sync(smmu_domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301337 arm_smmu_rpm_put(smmu);
1338 }
Robin Murphy32b12442017-09-28 15:55:01 +01001339}
1340
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001341static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1342 dma_addr_t iova)
1343{
Joerg Roedel1d672632015-03-26 13:43:10 +01001344 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001345 struct arm_smmu_device *smmu = smmu_domain->smmu;
1346 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1347 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1348 struct device *dev = smmu->dev;
1349 void __iomem *cb_base;
1350 u32 tmp;
1351 u64 phys;
Robin Murphy523d7422017-06-22 16:53:56 +01001352 unsigned long va, flags;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301353 int ret;
1354
1355 ret = arm_smmu_rpm_get(smmu);
1356 if (ret < 0)
1357 return 0;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001358
Robin Murphy452107c2017-03-30 17:56:30 +01001359 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001360
Robin Murphy523d7422017-06-22 16:53:56 +01001361 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy661d9622015-05-27 17:09:34 +01001362 /* ATS1 registers can only be written atomically */
1363 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001364 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001365 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1366 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001367 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001368
1369 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1370 !(tmp & ATSR_ACTIVE), 5, 50)) {
Robin Murphy523d7422017-06-22 16:53:56 +01001371 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001372 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001373 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001374 &iova);
1375 return ops->iova_to_phys(ops, iova);
1376 }
1377
Robin Murphyf9a05f02016-04-13 18:13:01 +01001378 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Robin Murphy523d7422017-06-22 16:53:56 +01001379 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001380 if (phys & CB_PAR_F) {
1381 dev_err(dev, "translation fault!\n");
1382 dev_err(dev, "PAR = 0x%llx\n", phys);
1383 return 0;
1384 }
1385
Sricharan Rd4a44f02018-12-04 11:52:10 +05301386 arm_smmu_rpm_put(smmu);
1387
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001388 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1389}
1390
Will Deacon45ae7cf2013-06-24 18:31:25 +01001391static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001392 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001393{
Joerg Roedel1d672632015-03-26 13:43:10 +01001394 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Robin Murphy523d7422017-06-22 16:53:56 +01001395 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001396
Sunil Gouthambdf95922017-04-25 15:27:52 +05301397 if (domain->type == IOMMU_DOMAIN_IDENTITY)
1398 return iova;
1399
Will Deacon518f7132014-11-14 17:17:54 +00001400 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001401 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001402
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001403 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
Robin Murphy523d7422017-06-22 16:53:56 +01001404 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1405 return arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001406
Robin Murphy523d7422017-06-22 16:53:56 +01001407 return ops->iova_to_phys(ops, iova);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001408}
1409
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001410static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001411{
Will Deacond0948942014-06-24 17:30:10 +01001412 switch (cap) {
1413 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001414 /*
1415 * Return true here as the SMMU can always send out coherent
1416 * requests.
1417 */
1418 return true;
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001419 case IOMMU_CAP_NOEXEC:
1420 return true;
Will Deacond0948942014-06-24 17:30:10 +01001421 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001422 return false;
Will Deacond0948942014-06-24 17:30:10 +01001423 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001424}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001425
Robin Murphy021bb842016-09-14 15:26:46 +01001426static int arm_smmu_match_node(struct device *dev, void *data)
1427{
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001428 return dev->fwnode == data;
Robin Murphy021bb842016-09-14 15:26:46 +01001429}
1430
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001431static
1432struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
Robin Murphy021bb842016-09-14 15:26:46 +01001433{
1434 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001435 fwnode, arm_smmu_match_node);
Robin Murphy021bb842016-09-14 15:26:46 +01001436 put_device(dev);
1437 return dev ? dev_get_drvdata(dev) : NULL;
1438}
1439
Will Deacon03edb222015-01-19 14:27:33 +00001440static int arm_smmu_add_device(struct device *dev)
1441{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001442 struct arm_smmu_device *smmu;
Robin Murphyf80cd882016-09-14 15:21:39 +01001443 struct arm_smmu_master_cfg *cfg;
Robin Murphy021bb842016-09-14 15:26:46 +01001444 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphyf80cd882016-09-14 15:21:39 +01001445 int i, ret;
1446
Robin Murphy021bb842016-09-14 15:26:46 +01001447 if (using_legacy_binding) {
1448 ret = arm_smmu_register_legacy_master(dev, &smmu);
Artem Savkova7990c62017-08-08 12:26:02 +02001449
1450 /*
1451 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
1452 * will allocate/initialise a new one. Thus we need to update fwspec for
1453 * later use.
1454 */
1455 fwspec = dev->iommu_fwspec;
Robin Murphy021bb842016-09-14 15:26:46 +01001456 if (ret)
1457 goto out_free;
Robin Murphy3c117b52016-11-02 17:31:32 +00001458 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001459 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
Robin Murphy021bb842016-09-14 15:26:46 +01001460 } else {
1461 return -ENODEV;
1462 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001463
1464 ret = -EINVAL;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001465 for (i = 0; i < fwspec->num_ids; i++) {
1466 u16 sid = fwspec->ids[i];
Robin Murphy021bb842016-09-14 15:26:46 +01001467 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyf80cd882016-09-14 15:21:39 +01001468
Robin Murphyadfec2e2016-09-12 17:13:55 +01001469 if (sid & ~smmu->streamid_mask) {
Robin Murphyf80cd882016-09-14 15:21:39 +01001470 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy021bb842016-09-14 15:26:46 +01001471 sid, smmu->streamid_mask);
1472 goto out_free;
1473 }
1474 if (mask & ~smmu->smr_mask_mask) {
1475 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
Peng Fan6323f472017-04-21 17:03:36 +08001476 mask, smmu->smr_mask_mask);
Robin Murphyf80cd882016-09-14 15:21:39 +01001477 goto out_free;
1478 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001479 }
Will Deacon03edb222015-01-19 14:27:33 +00001480
Robin Murphyadfec2e2016-09-12 17:13:55 +01001481 ret = -ENOMEM;
1482 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1483 GFP_KERNEL);
1484 if (!cfg)
1485 goto out_free;
1486
1487 cfg->smmu = smmu;
1488 fwspec->iommu_priv = cfg;
1489 while (i--)
1490 cfg->smendx[i] = INVALID_SMENDX;
1491
Sricharan Rd4a44f02018-12-04 11:52:10 +05301492 ret = arm_smmu_rpm_get(smmu);
1493 if (ret < 0)
1494 goto out_cfg_free;
1495
Robin Murphy588888a2016-09-12 17:13:54 +01001496 ret = arm_smmu_master_alloc_smes(dev);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301497 arm_smmu_rpm_put(smmu);
1498
Robin Murphyadfec2e2016-09-12 17:13:55 +01001499 if (ret)
Vivek Gautamc54451a2017-07-06 15:07:00 +05301500 goto out_cfg_free;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001501
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001502 iommu_device_link(&smmu->iommu, dev);
1503
Sricharan R655e3642018-12-04 11:52:11 +05301504 device_link_add(dev, smmu->dev,
1505 DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
1506
Robin Murphyadfec2e2016-09-12 17:13:55 +01001507 return 0;
Robin Murphyf80cd882016-09-14 15:21:39 +01001508
Vivek Gautamc54451a2017-07-06 15:07:00 +05301509out_cfg_free:
1510 kfree(cfg);
Robin Murphyf80cd882016-09-14 15:21:39 +01001511out_free:
Robin Murphyadfec2e2016-09-12 17:13:55 +01001512 iommu_fwspec_free(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001513 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00001514}
1515
Will Deacon45ae7cf2013-06-24 18:31:25 +01001516static void arm_smmu_remove_device(struct device *dev)
1517{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001518 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001519 struct arm_smmu_master_cfg *cfg;
1520 struct arm_smmu_device *smmu;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301521 int ret;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001522
Robin Murphyadfec2e2016-09-12 17:13:55 +01001523 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyf80cd882016-09-14 15:21:39 +01001524 return;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001525
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001526 cfg = fwspec->iommu_priv;
1527 smmu = cfg->smmu;
1528
Sricharan Rd4a44f02018-12-04 11:52:10 +05301529 ret = arm_smmu_rpm_get(smmu);
1530 if (ret < 0)
1531 return;
1532
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001533 iommu_device_unlink(&smmu->iommu, dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001534 arm_smmu_master_free_smes(fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301535
1536 arm_smmu_rpm_put(smmu);
1537
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001538 iommu_group_remove_device(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001539 kfree(fwspec->iommu_priv);
1540 iommu_fwspec_free(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001541}
1542
Joerg Roedelaf659932015-10-21 23:51:41 +02001543static struct iommu_group *arm_smmu_device_group(struct device *dev)
1544{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001545 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1546 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy588888a2016-09-12 17:13:54 +01001547 struct iommu_group *group = NULL;
1548 int i, idx;
1549
Robin Murphyadfec2e2016-09-12 17:13:55 +01001550 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001551 if (group && smmu->s2crs[idx].group &&
1552 group != smmu->s2crs[idx].group)
1553 return ERR_PTR(-EINVAL);
1554
1555 group = smmu->s2crs[idx].group;
1556 }
1557
1558 if (group)
Robin Murphye1b44cb2016-11-11 17:59:22 +00001559 return iommu_group_ref_get(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001560
1561 if (dev_is_pci(dev))
1562 group = pci_device_group(dev);
Nipun Guptaeab03e22018-09-10 19:19:18 +05301563 else if (dev_is_fsl_mc(dev))
1564 group = fsl_mc_device_group(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001565 else
1566 group = generic_device_group(dev);
1567
Joerg Roedelaf659932015-10-21 23:51:41 +02001568 return group;
1569}
1570
Will Deaconc752ce42014-06-25 22:46:31 +01001571static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1572 enum iommu_attr attr, void *data)
1573{
Joerg Roedel1d672632015-03-26 13:43:10 +01001574 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001575
Robin Murphy44f68762018-09-20 17:10:27 +01001576 switch(domain->type) {
1577 case IOMMU_DOMAIN_UNMANAGED:
1578 switch (attr) {
1579 case DOMAIN_ATTR_NESTING:
1580 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1581 return 0;
1582 default:
1583 return -ENODEV;
1584 }
1585 break;
1586 case IOMMU_DOMAIN_DMA:
1587 switch (attr) {
1588 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1589 *(int *)data = smmu_domain->non_strict;
1590 return 0;
1591 default:
1592 return -ENODEV;
1593 }
1594 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001595 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001596 return -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001597 }
1598}
1599
1600static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1601 enum iommu_attr attr, void *data)
1602{
Will Deacon518f7132014-11-14 17:17:54 +00001603 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001604 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001605
Will Deacon518f7132014-11-14 17:17:54 +00001606 mutex_lock(&smmu_domain->init_mutex);
1607
Robin Murphy44f68762018-09-20 17:10:27 +01001608 switch(domain->type) {
1609 case IOMMU_DOMAIN_UNMANAGED:
1610 switch (attr) {
1611 case DOMAIN_ATTR_NESTING:
1612 if (smmu_domain->smmu) {
1613 ret = -EPERM;
1614 goto out_unlock;
1615 }
1616
1617 if (*(int *)data)
1618 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1619 else
1620 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1621 break;
1622 default:
1623 ret = -ENODEV;
Will Deacon518f7132014-11-14 17:17:54 +00001624 }
Robin Murphy44f68762018-09-20 17:10:27 +01001625 break;
1626 case IOMMU_DOMAIN_DMA:
1627 switch (attr) {
1628 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1629 smmu_domain->non_strict = *(int *)data;
1630 break;
1631 default:
1632 ret = -ENODEV;
1633 }
Will Deacon518f7132014-11-14 17:17:54 +00001634 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001635 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001636 ret = -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001637 }
Will Deacon518f7132014-11-14 17:17:54 +00001638out_unlock:
1639 mutex_unlock(&smmu_domain->init_mutex);
1640 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001641}
1642
Robin Murphy021bb842016-09-14 15:26:46 +01001643static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1644{
Robin Murphy56fbf602017-03-31 12:03:33 +01001645 u32 mask, fwid = 0;
Robin Murphy021bb842016-09-14 15:26:46 +01001646
1647 if (args->args_count > 0)
1648 fwid |= (u16)args->args[0];
1649
1650 if (args->args_count > 1)
1651 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
Robin Murphy56fbf602017-03-31 12:03:33 +01001652 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
1653 fwid |= (u16)mask << SMR_MASK_SHIFT;
Robin Murphy021bb842016-09-14 15:26:46 +01001654
1655 return iommu_fwspec_add_ids(dev, &fwid, 1);
1656}
1657
Eric Augerf3ebee82017-01-19 20:57:55 +00001658static void arm_smmu_get_resv_regions(struct device *dev,
1659 struct list_head *head)
1660{
1661 struct iommu_resv_region *region;
1662 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1663
1664 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00001665 prot, IOMMU_RESV_SW_MSI);
Eric Augerf3ebee82017-01-19 20:57:55 +00001666 if (!region)
1667 return;
1668
1669 list_add_tail(&region->list, head);
Robin Murphy273df962017-03-16 17:00:19 +00001670
1671 iommu_dma_get_resv_regions(dev, head);
Eric Augerf3ebee82017-01-19 20:57:55 +00001672}
1673
1674static void arm_smmu_put_resv_regions(struct device *dev,
1675 struct list_head *head)
1676{
1677 struct iommu_resv_region *entry, *next;
1678
1679 list_for_each_entry_safe(entry, next, head, list)
1680 kfree(entry);
1681}
1682
Will Deacon518f7132014-11-14 17:17:54 +00001683static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001684 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001685 .domain_alloc = arm_smmu_domain_alloc,
1686 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001687 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001688 .map = arm_smmu_map,
1689 .unmap = arm_smmu_unmap,
Robin Murphy44f68762018-09-20 17:10:27 +01001690 .flush_iotlb_all = arm_smmu_flush_iotlb_all,
Robin Murphy32b12442017-09-28 15:55:01 +01001691 .iotlb_sync = arm_smmu_iotlb_sync,
Will Deaconc752ce42014-06-25 22:46:31 +01001692 .iova_to_phys = arm_smmu_iova_to_phys,
1693 .add_device = arm_smmu_add_device,
1694 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001695 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001696 .domain_get_attr = arm_smmu_domain_get_attr,
1697 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy021bb842016-09-14 15:26:46 +01001698 .of_xlate = arm_smmu_of_xlate,
Eric Augerf3ebee82017-01-19 20:57:55 +00001699 .get_resv_regions = arm_smmu_get_resv_regions,
1700 .put_resv_regions = arm_smmu_put_resv_regions,
Will Deacon518f7132014-11-14 17:17:54 +00001701 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001702};
1703
1704static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1705{
1706 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001707 int i;
Peng Fan3ca37122016-05-03 21:50:30 +08001708 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001709
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001710 /* clear global FSR */
1711 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1712 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001713
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001714 /*
1715 * Reset stream mapping groups: Initial values mark all SMRn as
1716 * invalid and all S2CRn as bypass unless overridden.
1717 */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001718 for (i = 0; i < smmu->num_mapping_groups; ++i)
1719 arm_smmu_write_sme(smmu, i);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001720
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301721 if (smmu->model == ARM_MMU500) {
1722 /*
1723 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1724 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1725 * bit is only present in MMU-500r2 onwards.
1726 */
1727 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1728 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
Peng Fan3ca37122016-05-03 21:50:30 +08001729 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301730 if (major >= 2)
1731 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1732 /*
1733 * Allow unmatched Stream IDs to allocate bypass
1734 * TLB entries for reduced latency.
1735 */
Feng Kan74f55d32017-10-11 15:08:39 -07001736 reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN;
Peng Fan3ca37122016-05-03 21:50:30 +08001737 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1738 }
1739
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001740 /* Make sure all context banks are disabled and clear CB_FSR */
1741 for (i = 0; i < smmu->num_context_banks; ++i) {
Robin Murphy90df3732017-08-08 14:56:14 +01001742 void __iomem *cb_base = ARM_SMMU_CB(smmu, i);
1743
1744 arm_smmu_write_context_bank(smmu, i);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001745 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001746 /*
1747 * Disable MMU-500's not-particularly-beneficial next-page
1748 * prefetcher for the sake of errata #841119 and #826419.
1749 */
1750 if (smmu->model == ARM_MMU500) {
1751 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1752 reg &= ~ARM_MMU500_ACTLR_CPRE;
1753 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1754 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001755 }
Will Deacon1463fe42013-07-31 19:21:27 +01001756
Will Deacon45ae7cf2013-06-24 18:31:25 +01001757 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001758 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1759 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1760
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001761 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001762
Will Deacon45ae7cf2013-06-24 18:31:25 +01001763 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001764 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001765
1766 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001767 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001768
Robin Murphy25a1c962016-02-10 14:25:33 +00001769 /* Enable client access, handling unmatched streams as appropriate */
1770 reg &= ~sCR0_CLIENTPD;
1771 if (disable_bypass)
1772 reg |= sCR0_USFCFG;
1773 else
1774 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001775
1776 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001777 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001778
1779 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001780 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001781
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001782 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1783 reg |= sCR0_VMID16EN;
1784
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001785 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1786 reg |= sCR0_EXIDENABLE;
1787
Will Deacon45ae7cf2013-06-24 18:31:25 +01001788 /* Push the button */
Robin Murphy11febfc2017-03-30 17:56:31 +01001789 arm_smmu_tlb_sync_global(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001790 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001791}
1792
1793static int arm_smmu_id_size_to_bits(int size)
1794{
1795 switch (size) {
1796 case 0:
1797 return 32;
1798 case 1:
1799 return 36;
1800 case 2:
1801 return 40;
1802 case 3:
1803 return 42;
1804 case 4:
1805 return 44;
1806 case 5:
1807 default:
1808 return 48;
1809 }
1810}
1811
1812static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1813{
1814 unsigned long size;
1815 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1816 u32 id;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001817 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001818 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001819
1820 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001821 dev_notice(smmu->dev, "SMMUv%d with:\n",
1822 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001823
1824 /* ID0 */
1825 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001826
1827 /* Restrict available stages based on module parameter */
1828 if (force_stage == 1)
1829 id &= ~(ID0_S2TS | ID0_NTS);
1830 else if (force_stage == 2)
1831 id &= ~(ID0_S1TS | ID0_NTS);
1832
Will Deacon45ae7cf2013-06-24 18:31:25 +01001833 if (id & ID0_S1TS) {
1834 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1835 dev_notice(smmu->dev, "\tstage 1 translation\n");
1836 }
1837
1838 if (id & ID0_S2TS) {
1839 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1840 dev_notice(smmu->dev, "\tstage 2 translation\n");
1841 }
1842
1843 if (id & ID0_NTS) {
1844 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1845 dev_notice(smmu->dev, "\tnested translation\n");
1846 }
1847
1848 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001849 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001850 dev_err(smmu->dev, "\tno translation support!\n");
1851 return -ENODEV;
1852 }
1853
Robin Murphyb7862e32016-04-13 18:13:03 +01001854 if ((id & ID0_S1TS) &&
1855 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001856 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1857 dev_notice(smmu->dev, "\taddress translation ops\n");
1858 }
1859
Robin Murphybae2c2d2015-07-29 19:46:05 +01001860 /*
1861 * In order for DMA API calls to work properly, we must defer to what
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001862 * the FW says about coherency, regardless of what the hardware claims.
Robin Murphybae2c2d2015-07-29 19:46:05 +01001863 * Fortunately, this also opens up a workaround for systems where the
1864 * ID register value has ended up configured incorrectly.
1865 */
Robin Murphybae2c2d2015-07-29 19:46:05 +01001866 cttw_reg = !!(id & ID0_CTTW);
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001867 if (cttw_fw || cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001868 dev_notice(smmu->dev, "\t%scoherent table walk\n",
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001869 cttw_fw ? "" : "non-");
1870 if (cttw_fw != cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001871 dev_notice(smmu->dev,
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001872 "\t(IDR0.CTTW overridden by FW configuration)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001873
Robin Murphy21174242016-09-12 17:13:48 +01001874 /* Max. number of entries we have for stream matching/indexing */
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001875 if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
1876 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1877 size = 1 << 16;
1878 } else {
1879 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
1880 }
Robin Murphy21174242016-09-12 17:13:48 +01001881 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001882 if (id & ID0_SMS) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001883 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy21174242016-09-12 17:13:48 +01001884 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
1885 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001886 dev_err(smmu->dev,
1887 "stream-matching supported, but no SMRs present!\n");
1888 return -ENODEV;
1889 }
1890
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001891 /* Zero-initialised to mark as invalid */
1892 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1893 GFP_KERNEL);
1894 if (!smmu->smrs)
1895 return -ENOMEM;
1896
Will Deacon45ae7cf2013-06-24 18:31:25 +01001897 dev_notice(smmu->dev,
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001898 "\tstream matching with %lu register groups", size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001899 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001900 /* s2cr->type == 0 means translation, so initialise explicitly */
1901 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1902 GFP_KERNEL);
1903 if (!smmu->s2crs)
1904 return -ENOMEM;
1905 for (i = 0; i < size; i++)
1906 smmu->s2crs[i] = s2cr_init_val;
1907
Robin Murphy21174242016-09-12 17:13:48 +01001908 smmu->num_mapping_groups = size;
Robin Murphy588888a2016-09-12 17:13:54 +01001909 mutex_init(&smmu->stream_map_mutex);
Will Deacon8e517e72017-07-06 15:55:48 +01001910 spin_lock_init(&smmu->global_sync_lock);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001911
Robin Murphy7602b872016-04-28 17:12:09 +01001912 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1913 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1914 if (!(id & ID0_PTFS_NO_AARCH32S))
1915 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1916 }
1917
Will Deacon45ae7cf2013-06-24 18:31:25 +01001918 /* ID1 */
1919 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001920 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001921
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001922 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001923 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Robin Murphy452107c2017-03-30 17:56:30 +01001924 size <<= smmu->pgshift;
1925 if (smmu->cb_base != gr0_base + size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001926 dev_warn(smmu->dev,
Robin Murphy452107c2017-03-30 17:56:30 +01001927 "SMMU address space size (0x%lx) differs from mapped region size (0x%tx)!\n",
1928 size * 2, (smmu->cb_base - gr0_base) * 2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001929
Will Deacon518f7132014-11-14 17:17:54 +00001930 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001931 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1932 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1933 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1934 return -ENODEV;
1935 }
1936 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1937 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001938 /*
1939 * Cavium CN88xx erratum #27704.
1940 * Ensure ASID and VMID allocation is unique across all SMMUs in
1941 * the system.
1942 */
1943 if (smmu->model == CAVIUM_SMMUV2) {
1944 smmu->cavium_id_base =
1945 atomic_add_return(smmu->num_context_banks,
1946 &cavium_smmu_context_count);
1947 smmu->cavium_id_base -= smmu->num_context_banks;
Robert Richter53c35dce2017-03-13 11:39:01 +01001948 dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
Robin Murphye086d912016-04-13 18:12:58 +01001949 }
Robin Murphy90df3732017-08-08 14:56:14 +01001950 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
1951 sizeof(*smmu->cbs), GFP_KERNEL);
1952 if (!smmu->cbs)
1953 return -ENOMEM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001954
1955 /* ID2 */
1956 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1957 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001958 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001959
Will Deacon518f7132014-11-14 17:17:54 +00001960 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001961 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001962 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001963
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001964 if (id & ID2_VMID16)
1965 smmu->features |= ARM_SMMU_FEAT_VMID16;
1966
Robin Murphyf1d84542015-03-04 16:41:05 +00001967 /*
1968 * What the page table walker can address actually depends on which
1969 * descriptor format is in use, but since a) we don't know that yet,
1970 * and b) it can vary per context bank, this will have to do...
1971 */
1972 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1973 dev_warn(smmu->dev,
1974 "failed to set DMA mask for table walker\n");
1975
Robin Murphyb7862e32016-04-13 18:13:03 +01001976 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001977 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001978 if (smmu->version == ARM_SMMU_V1_64K)
1979 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001980 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001981 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00001982 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001983 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001984 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001985 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001986 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001987 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001988 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001989 }
1990
Robin Murphy7602b872016-04-28 17:12:09 +01001991 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01001992 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01001993 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01001994 if (smmu->features &
1995 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01001996 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01001997 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01001998 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01001999 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01002000 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01002001
Robin Murphyd5466352016-05-09 17:20:09 +01002002 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2003 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2004 else
2005 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2006 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
2007 smmu->pgsize_bitmap);
2008
Will Deacon518f7132014-11-14 17:17:54 +00002009
Will Deacon28d60072014-09-01 16:24:48 +01002010 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
2011 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002012 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002013
2014 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
2015 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002016 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002017
Will Deacon45ae7cf2013-06-24 18:31:25 +01002018 return 0;
2019}
2020
Robin Murphy67b65a32016-04-13 18:12:57 +01002021struct arm_smmu_match_data {
2022 enum arm_smmu_arch_version version;
2023 enum arm_smmu_implementation model;
2024};
2025
2026#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
Sricharan R96a299d2018-12-04 11:52:09 +05302027static const struct arm_smmu_match_data name = { .version = ver, .model = imp }
Robin Murphy67b65a32016-04-13 18:12:57 +01002028
2029ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
2030ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01002031ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002032ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01002033ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Vivek Gautam89cddc52018-12-04 11:52:13 +05302034ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01002035
Joerg Roedel09b52692014-10-02 12:24:45 +02002036static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01002037 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
2038 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
2039 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01002040 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002041 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01002042 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Vivek Gautam89cddc52018-12-04 11:52:13 +05302043 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01002044 { },
2045};
2046MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2047
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002048#ifdef CONFIG_ACPI
2049static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
2050{
2051 int ret = 0;
2052
2053 switch (model) {
2054 case ACPI_IORT_SMMU_V1:
2055 case ACPI_IORT_SMMU_CORELINK_MMU400:
2056 smmu->version = ARM_SMMU_V1;
2057 smmu->model = GENERIC_SMMU;
2058 break;
Robin Murphy84c24372017-06-19 16:41:56 +01002059 case ACPI_IORT_SMMU_CORELINK_MMU401:
2060 smmu->version = ARM_SMMU_V1_64K;
2061 smmu->model = GENERIC_SMMU;
2062 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002063 case ACPI_IORT_SMMU_V2:
2064 smmu->version = ARM_SMMU_V2;
2065 smmu->model = GENERIC_SMMU;
2066 break;
2067 case ACPI_IORT_SMMU_CORELINK_MMU500:
2068 smmu->version = ARM_SMMU_V2;
2069 smmu->model = ARM_MMU500;
2070 break;
Robin Murphy84c24372017-06-19 16:41:56 +01002071 case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
2072 smmu->version = ARM_SMMU_V2;
2073 smmu->model = CAVIUM_SMMUV2;
2074 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002075 default:
2076 ret = -ENODEV;
2077 }
2078
2079 return ret;
2080}
2081
2082static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2083 struct arm_smmu_device *smmu)
2084{
2085 struct device *dev = smmu->dev;
2086 struct acpi_iort_node *node =
2087 *(struct acpi_iort_node **)dev_get_platdata(dev);
2088 struct acpi_iort_smmu *iort_smmu;
2089 int ret;
2090
2091 /* Retrieve SMMU1/2 specific data */
2092 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
2093
2094 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
2095 if (ret < 0)
2096 return ret;
2097
2098 /* Ignore the configuration access interrupt */
2099 smmu->num_global_irqs = 1;
2100
2101 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
2102 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2103
2104 return 0;
2105}
2106#else
2107static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2108 struct arm_smmu_device *smmu)
2109{
2110 return -ENODEV;
2111}
2112#endif
2113
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002114static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2115 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002116{
Robin Murphy67b65a32016-04-13 18:12:57 +01002117 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002118 struct device *dev = &pdev->dev;
Robin Murphy021bb842016-09-14 15:26:46 +01002119 bool legacy_binding;
2120
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002121 if (of_property_read_u32(dev->of_node, "#global-interrupts",
2122 &smmu->num_global_irqs)) {
2123 dev_err(dev, "missing #global-interrupts property\n");
2124 return -ENODEV;
2125 }
2126
2127 data = of_device_get_match_data(dev);
2128 smmu->version = data->version;
2129 smmu->model = data->model;
2130
2131 parse_driver_options(smmu);
2132
Robin Murphy021bb842016-09-14 15:26:46 +01002133 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
2134 if (legacy_binding && !using_generic_binding) {
2135 if (!using_legacy_binding)
2136 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
2137 using_legacy_binding = true;
2138 } else if (!legacy_binding && !using_legacy_binding) {
2139 using_generic_binding = true;
2140 } else {
2141 dev_err(dev, "not probing due to mismatched DT properties\n");
2142 return -ENODEV;
2143 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002144
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002145 if (of_dma_is_coherent(dev->of_node))
2146 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2147
2148 return 0;
2149}
2150
Robin Murphyf6810c12017-04-10 16:51:05 +05302151static void arm_smmu_bus_init(void)
2152{
2153 /* Oh, for a proper bus abstraction */
2154 if (!iommu_present(&platform_bus_type))
2155 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2156#ifdef CONFIG_ARM_AMBA
2157 if (!iommu_present(&amba_bustype))
2158 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2159#endif
2160#ifdef CONFIG_PCI
2161 if (!iommu_present(&pci_bus_type)) {
2162 pci_request_acs();
2163 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2164 }
2165#endif
Nipun Guptaeab03e22018-09-10 19:19:18 +05302166#ifdef CONFIG_FSL_MC_BUS
2167 if (!iommu_present(&fsl_mc_bus_type))
2168 bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
2169#endif
Robin Murphyf6810c12017-04-10 16:51:05 +05302170}
2171
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002172static int arm_smmu_device_probe(struct platform_device *pdev)
2173{
2174 struct resource *res;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002175 resource_size_t ioaddr;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002176 struct arm_smmu_device *smmu;
2177 struct device *dev = &pdev->dev;
2178 int num_irqs, i, err;
2179
Will Deacon45ae7cf2013-06-24 18:31:25 +01002180 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2181 if (!smmu) {
2182 dev_err(dev, "failed to allocate arm_smmu_device\n");
2183 return -ENOMEM;
2184 }
2185 smmu->dev = dev;
2186
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002187 if (dev->of_node)
2188 err = arm_smmu_device_dt_probe(pdev, smmu);
2189 else
2190 err = arm_smmu_device_acpi_probe(pdev, smmu);
2191
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002192 if (err)
2193 return err;
Robin Murphy09360402014-08-28 17:51:59 +01002194
Will Deacon45ae7cf2013-06-24 18:31:25 +01002195 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002196 ioaddr = res->start;
Julia Lawall8a7f4312013-08-19 12:20:37 +01002197 smmu->base = devm_ioremap_resource(dev, res);
2198 if (IS_ERR(smmu->base))
2199 return PTR_ERR(smmu->base);
Robin Murphy452107c2017-03-30 17:56:30 +01002200 smmu->cb_base = smmu->base + resource_size(res) / 2;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002201
Will Deacon45ae7cf2013-06-24 18:31:25 +01002202 num_irqs = 0;
2203 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2204 num_irqs++;
2205 if (num_irqs > smmu->num_global_irqs)
2206 smmu->num_context_irqs++;
2207 }
2208
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002209 if (!smmu->num_context_irqs) {
2210 dev_err(dev, "found %d interrupts but expected at least %d\n",
2211 num_irqs, smmu->num_global_irqs + 1);
2212 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002213 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002214
Kees Cooka86854d2018-06-12 14:07:58 -07002215 smmu->irqs = devm_kcalloc(dev, num_irqs, sizeof(*smmu->irqs),
Will Deacon45ae7cf2013-06-24 18:31:25 +01002216 GFP_KERNEL);
2217 if (!smmu->irqs) {
2218 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2219 return -ENOMEM;
2220 }
2221
2222 for (i = 0; i < num_irqs; ++i) {
2223 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002224
Will Deacon45ae7cf2013-06-24 18:31:25 +01002225 if (irq < 0) {
2226 dev_err(dev, "failed to get irq index %d\n", i);
2227 return -ENODEV;
2228 }
2229 smmu->irqs[i] = irq;
2230 }
2231
Sricharan R96a299d2018-12-04 11:52:09 +05302232 err = devm_clk_bulk_get_all(dev, &smmu->clks);
2233 if (err < 0) {
2234 dev_err(dev, "failed to get clocks %d\n", err);
2235 return err;
2236 }
2237 smmu->num_clks = err;
2238
2239 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
2240 if (err)
2241 return err;
2242
Olav Haugan3c8766d2014-08-22 17:12:32 -07002243 err = arm_smmu_device_cfg_probe(smmu);
2244 if (err)
2245 return err;
2246
Vivek Gautamd1e20222018-07-19 23:23:56 +05302247 if (smmu->version == ARM_SMMU_V2) {
2248 if (smmu->num_context_banks > smmu->num_context_irqs) {
2249 dev_err(dev,
2250 "found only %d context irq(s) but %d required\n",
2251 smmu->num_context_irqs, smmu->num_context_banks);
2252 return -ENODEV;
2253 }
2254
2255 /* Ignore superfluous interrupts */
2256 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002257 }
2258
Will Deacon45ae7cf2013-06-24 18:31:25 +01002259 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08002260 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2261 arm_smmu_global_fault,
2262 IRQF_SHARED,
2263 "arm-smmu global fault",
2264 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002265 if (err) {
2266 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2267 i, smmu->irqs[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01002268 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002269 }
2270 }
2271
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002272 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2273 "smmu.%pa", &ioaddr);
2274 if (err) {
2275 dev_err(dev, "Failed to register iommu in sysfs\n");
2276 return err;
2277 }
2278
2279 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2280 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2281
2282 err = iommu_device_register(&smmu->iommu);
2283 if (err) {
2284 dev_err(dev, "Failed to register iommu\n");
2285 return err;
2286 }
2287
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002288 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01002289 arm_smmu_device_reset(smmu);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03002290 arm_smmu_test_smr_masks(smmu);
Robin Murphy021bb842016-09-14 15:26:46 +01002291
Robin Murphyf6810c12017-04-10 16:51:05 +05302292 /*
Sricharan Rd4a44f02018-12-04 11:52:10 +05302293 * We want to avoid touching dev->power.lock in fastpaths unless
2294 * it's really going to do something useful - pm_runtime_enabled()
2295 * can serve as an ideal proxy for that decision. So, conditionally
2296 * enable pm_runtime.
2297 */
2298 if (dev->pm_domain) {
2299 pm_runtime_set_active(dev);
2300 pm_runtime_enable(dev);
2301 }
2302
2303 /*
Robin Murphyf6810c12017-04-10 16:51:05 +05302304 * For ACPI and generic DT bindings, an SMMU will be probed before
2305 * any device which might need it, so we want the bus ops in place
2306 * ready to handle default domain setup as soon as any SMMU exists.
2307 */
2308 if (!using_legacy_binding)
2309 arm_smmu_bus_init();
2310
Will Deacon45ae7cf2013-06-24 18:31:25 +01002311 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002312}
2313
Robin Murphyf6810c12017-04-10 16:51:05 +05302314/*
2315 * With the legacy DT binding in play, though, we have no guarantees about
2316 * probe order, but then we're also not doing default domains, so we can
2317 * delay setting bus ops until we're sure every possible SMMU is ready,
2318 * and that way ensure that no add_device() calls get missed.
2319 */
2320static int arm_smmu_legacy_bus_init(void)
2321{
2322 if (using_legacy_binding)
2323 arm_smmu_bus_init();
2324 return 0;
2325}
2326device_initcall_sync(arm_smmu_legacy_bus_init);
2327
Will Deacon45ae7cf2013-06-24 18:31:25 +01002328static int arm_smmu_device_remove(struct platform_device *pdev)
2329{
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002330 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002331
2332 if (!smmu)
2333 return -ENODEV;
2334
Will Deaconecfadb62013-07-31 19:21:28 +01002335 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002336 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002337
Sricharan Rd4a44f02018-12-04 11:52:10 +05302338 arm_smmu_rpm_get(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002339 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002340 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Sricharan Rd4a44f02018-12-04 11:52:10 +05302341 arm_smmu_rpm_put(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302342
Sricharan Rd4a44f02018-12-04 11:52:10 +05302343 if (pm_runtime_enabled(smmu->dev))
2344 pm_runtime_force_suspend(smmu->dev);
2345 else
2346 clk_bulk_disable(smmu->num_clks, smmu->clks);
2347
2348 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
Sricharan R96a299d2018-12-04 11:52:09 +05302349
Will Deacon45ae7cf2013-06-24 18:31:25 +01002350 return 0;
2351}
2352
Nate Watterson7aa86192017-06-29 18:18:15 -04002353static void arm_smmu_device_shutdown(struct platform_device *pdev)
2354{
2355 arm_smmu_device_remove(pdev);
2356}
2357
Sricharan R96a299d2018-12-04 11:52:09 +05302358static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
Robin Murphya2d866f2017-08-08 14:56:15 +01002359{
2360 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
Sricharan R96a299d2018-12-04 11:52:09 +05302361 int ret;
2362
2363 ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
2364 if (ret)
2365 return ret;
Robin Murphya2d866f2017-08-08 14:56:15 +01002366
2367 arm_smmu_device_reset(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302368
Robin Murphya2d866f2017-08-08 14:56:15 +01002369 return 0;
2370}
2371
Sricharan R96a299d2018-12-04 11:52:09 +05302372static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
2373{
2374 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2375
2376 clk_bulk_disable(smmu->num_clks, smmu->clks);
2377
2378 return 0;
2379}
2380
2381static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
2382{
2383 if (pm_runtime_suspended(dev))
2384 return 0;
2385
2386 return arm_smmu_runtime_resume(dev);
2387}
2388
2389static int __maybe_unused arm_smmu_pm_suspend(struct device *dev)
2390{
2391 if (pm_runtime_suspended(dev))
2392 return 0;
2393
2394 return arm_smmu_runtime_suspend(dev);
2395}
2396
2397static const struct dev_pm_ops arm_smmu_pm_ops = {
2398 SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_pm_suspend, arm_smmu_pm_resume)
2399 SET_RUNTIME_PM_OPS(arm_smmu_runtime_suspend,
2400 arm_smmu_runtime_resume, NULL)
2401};
Robin Murphya2d866f2017-08-08 14:56:15 +01002402
Will Deacon45ae7cf2013-06-24 18:31:25 +01002403static struct platform_driver arm_smmu_driver = {
2404 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002405 .name = "arm-smmu",
2406 .of_match_table = of_match_ptr(arm_smmu_of_match),
Robin Murphya2d866f2017-08-08 14:56:15 +01002407 .pm = &arm_smmu_pm_ops,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002408 },
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002409 .probe = arm_smmu_device_probe,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002410 .remove = arm_smmu_device_remove,
Nate Watterson7aa86192017-06-29 18:18:15 -04002411 .shutdown = arm_smmu_device_shutdown,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002412};
Robin Murphyf6810c12017-04-10 16:51:05 +05302413module_platform_driver(arm_smmu_driver);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002414
Will Deacon45ae7cf2013-06-24 18:31:25 +01002415MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2416MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2417MODULE_LICENSE("GPL v2");