blob: af18a7e7f91724d62e7b042d14ecc602418713d9 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +030027 * - Extended Stream ID (16 bit)
Will Deacon45ae7cf2013-06-24 18:31:25 +010028 */
29
30#define pr_fmt(fmt) "arm-smmu: " fmt
31
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +000032#include <linux/acpi.h>
33#include <linux/acpi_iort.h>
Robin Murphy1f3d5ca2016-09-12 17:13:49 +010034#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010035#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000036#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010037#include <linux/dma-mapping.h>
38#include <linux/err.h>
39#include <linux/interrupt.h>
40#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010041#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010042#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000043#include <linux/iopoll.h>
Paul Gortmakeraddb672f2018-12-01 14:19:16 -050044#include <linux/init.h>
45#include <linux/moduleparam.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010046#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010047#include <linux/of_address.h>
Robin Murphyd6fc5d92016-09-12 17:13:52 +010048#include <linux/of_device.h>
Robin Murphyadfec2e2016-09-12 17:13:55 +010049#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010050#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010051#include <linux/platform_device.h>
Sricharan R96a299d2018-12-04 11:52:09 +053052#include <linux/pm_runtime.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010053#include <linux/slab.h>
54#include <linux/spinlock.h>
55
56#include <linux/amba/bus.h>
Nipun Guptaeab03e22018-09-10 19:19:18 +053057#include <linux/fsl/mc.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010058
Will Deacon518f7132014-11-14 17:17:54 +000059#include "io-pgtable.h"
Rob Clark2b037742017-08-09 10:43:03 -040060#include "arm-smmu-regs.h"
61
62#define ARM_MMU500_ACTLR_CPRE (1 << 1)
63
64#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
Feng Kan74f55d32017-10-11 15:08:39 -070065#define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10)
Rob Clark2b037742017-08-09 10:43:03 -040066#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
67
68#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
69#define TLB_SPIN_COUNT 10
Will Deacon45ae7cf2013-06-24 18:31:25 +010070
Will Deacon45ae7cf2013-06-24 18:31:25 +010071/* Maximum number of context banks per SMMU */
72#define ARM_SMMU_MAX_CBS 128
73
Will Deacon45ae7cf2013-06-24 18:31:25 +010074/* SMMU global address space */
75#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010076#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010077
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000078/*
79 * SMMU global address space with conditional offset to access secure
80 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
81 * nsGFSYNR0: 0x450)
82 */
83#define ARM_SMMU_GR0_NS(smmu) \
84 ((smmu)->base + \
85 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
86 ? 0x400 : 0))
87
Robin Murphyf9a05f02016-04-13 18:13:01 +010088/*
89 * Some 64-bit registers only make sense to write atomically, but in such
90 * cases all the data relevant to AArch32 formats lies within the lower word,
91 * therefore this actually makes more sense than it might first appear.
92 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010093#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010094#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010095#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010096#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010097#endif
98
Will Deacon45ae7cf2013-06-24 18:31:25 +010099/* Translation context bank */
Robin Murphy452107c2017-03-30 17:56:30 +0100100#define ARM_SMMU_CB(smmu, n) ((smmu)->cb_base + ((n) << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100101
Eric Augerf3ebee82017-01-19 20:57:55 +0000102#define MSI_IOVA_BASE 0x8000000
103#define MSI_IOVA_LENGTH 0x100000
104
Will Deacon4cf740b2014-07-14 19:47:39 +0100105static int force_stage;
Paul Gortmakeraddb672f2018-12-01 14:19:16 -0500106/*
107 * not really modular, but the easiest way to keep compat with existing
108 * bootargs behaviour is to continue using module_param() here.
109 */
Robin Murphy25a1c962016-02-10 14:25:33 +0000110module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100111MODULE_PARM_DESC(force_stage,
112 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000113static bool disable_bypass;
114module_param(disable_bypass, bool, S_IRUGO);
115MODULE_PARM_DESC(disable_bypass,
116 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100117
Robin Murphy09360402014-08-28 17:51:59 +0100118enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100119 ARM_SMMU_V1,
120 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100121 ARM_SMMU_V2,
122};
123
Robin Murphy67b65a32016-04-13 18:12:57 +0100124enum arm_smmu_implementation {
125 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100126 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100127 CAVIUM_SMMUV2,
Vivek Gautam89cddc52018-12-04 11:52:13 +0530128 QCOM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100129};
130
Robin Murphy8e8b2032016-09-12 17:13:50 +0100131struct arm_smmu_s2cr {
Robin Murphy588888a2016-09-12 17:13:54 +0100132 struct iommu_group *group;
133 int count;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100134 enum arm_smmu_s2cr_type type;
135 enum arm_smmu_s2cr_privcfg privcfg;
136 u8 cbndx;
137};
138
139#define s2cr_init_val (struct arm_smmu_s2cr){ \
140 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
141}
142
Will Deacon45ae7cf2013-06-24 18:31:25 +0100143struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100144 u16 mask;
145 u16 id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100146 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100147};
148
Robin Murphy90df3732017-08-08 14:56:14 +0100149struct arm_smmu_cb {
150 u64 ttbr[2];
151 u32 tcr[2];
152 u32 mair[2];
153 struct arm_smmu_cfg *cfg;
154};
155
Will Deacona9a1b0b2014-05-01 18:05:08 +0100156struct arm_smmu_master_cfg {
Robin Murphyf80cd882016-09-14 15:21:39 +0100157 struct arm_smmu_device *smmu;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100158 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100159};
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100160#define INVALID_SMENDX -1
Robin Murphyadfec2e2016-09-12 17:13:55 +0100161#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
162#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000163#define fwspec_smendx(fw, i) \
164 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphyadfec2e2016-09-12 17:13:55 +0100165#define for_each_cfg_sme(fw, i, idx) \
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000166 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100167
168struct arm_smmu_device {
169 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100170
171 void __iomem *base;
Robin Murphy452107c2017-03-30 17:56:30 +0100172 void __iomem *cb_base;
Will Deaconc757e852014-07-30 11:33:25 +0100173 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100174
175#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
176#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
177#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
178#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
179#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000180#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800181#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100182#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
183#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
184#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
185#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
186#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300187#define ARM_SMMU_FEAT_EXIDS (1 << 12)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100188 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000189
190#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
191 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100192 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100193 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100194
195 u32 num_context_banks;
196 u32 num_s2_context_banks;
197 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
Robin Murphy90df3732017-08-08 14:56:14 +0100198 struct arm_smmu_cb *cbs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100199 atomic_t irptndx;
200
201 u32 num_mapping_groups;
Robin Murphy21174242016-09-12 17:13:48 +0100202 u16 streamid_mask;
203 u16 smr_mask_mask;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100204 struct arm_smmu_smr *smrs;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100205 struct arm_smmu_s2cr *s2crs;
Robin Murphy588888a2016-09-12 17:13:54 +0100206 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100207
Will Deacon518f7132014-11-14 17:17:54 +0000208 unsigned long va_size;
209 unsigned long ipa_size;
210 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100211 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100212
213 u32 num_global_irqs;
214 u32 num_context_irqs;
215 unsigned int *irqs;
Sricharan R96a299d2018-12-04 11:52:09 +0530216 struct clk_bulk_data *clks;
217 int num_clks;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100218
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800219 u32 cavium_id_base; /* Specific to Cavium */
Joerg Roedel9648cbc2017-02-01 18:11:36 +0100220
Will Deacon8e517e72017-07-06 15:55:48 +0100221 spinlock_t global_sync_lock;
222
Joerg Roedel9648cbc2017-02-01 18:11:36 +0100223 /* IOMMU core code handle */
224 struct iommu_device iommu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100225};
226
Robin Murphy7602b872016-04-28 17:12:09 +0100227enum arm_smmu_context_fmt {
228 ARM_SMMU_CTX_FMT_NONE,
229 ARM_SMMU_CTX_FMT_AARCH64,
230 ARM_SMMU_CTX_FMT_AARCH32_L,
231 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100232};
233
234struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100235 u8 cbndx;
236 u8 irptndx;
Robin Murphy280b6832017-03-30 17:56:29 +0100237 union {
238 u16 asid;
239 u16 vmid;
240 };
Will Deacon45ae7cf2013-06-24 18:31:25 +0100241 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100242 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100243};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100244#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100245
Will Deaconc752ce42014-06-25 22:46:31 +0100246enum arm_smmu_domain_stage {
247 ARM_SMMU_DOMAIN_S1 = 0,
248 ARM_SMMU_DOMAIN_S2,
249 ARM_SMMU_DOMAIN_NESTED,
Will Deacon61bc6712017-01-06 16:56:03 +0000250 ARM_SMMU_DOMAIN_BYPASS,
Will Deaconc752ce42014-06-25 22:46:31 +0100251};
252
Will Deacon45ae7cf2013-06-24 18:31:25 +0100253struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100254 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000255 struct io_pgtable_ops *pgtbl_ops;
Robin Murphy32b12442017-09-28 15:55:01 +0100256 const struct iommu_gather_ops *tlb_ops;
Will Deacon44680ee2014-06-25 11:29:12 +0100257 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100258 enum arm_smmu_domain_stage stage;
Robin Murphy44f68762018-09-20 17:10:27 +0100259 bool non_strict;
Will Deacon518f7132014-11-14 17:17:54 +0000260 struct mutex init_mutex; /* Protects smmu pointer */
Will Deacon8e517e72017-07-06 15:55:48 +0100261 spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
Joerg Roedel1d672632015-03-26 13:43:10 +0100262 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100263};
264
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000265struct arm_smmu_option_prop {
266 u32 opt;
267 const char *prop;
268};
269
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800270static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
271
Robin Murphy021bb842016-09-14 15:26:46 +0100272static bool using_legacy_binding, using_generic_binding;
273
Mitchel Humpherys29073202014-07-08 09:52:18 -0700274static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000275 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
276 { 0, NULL},
277};
278
Sricharan Rd4a44f02018-12-04 11:52:10 +0530279static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
280{
281 if (pm_runtime_enabled(smmu->dev))
282 return pm_runtime_get_sync(smmu->dev);
283
284 return 0;
285}
286
287static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
288{
289 if (pm_runtime_enabled(smmu->dev))
290 pm_runtime_put(smmu->dev);
291}
292
Joerg Roedel1d672632015-03-26 13:43:10 +0100293static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
294{
295 return container_of(dom, struct arm_smmu_domain, domain);
296}
297
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000298static void parse_driver_options(struct arm_smmu_device *smmu)
299{
300 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700301
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000302 do {
303 if (of_property_read_bool(smmu->dev->of_node,
304 arm_smmu_options[i].prop)) {
305 smmu->options |= arm_smmu_options[i].opt;
306 dev_notice(smmu->dev, "option %s\n",
307 arm_smmu_options[i].prop);
308 }
309 } while (arm_smmu_options[++i].opt);
310}
311
Will Deacon8f68f8e2014-07-15 11:27:08 +0100312static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100313{
314 if (dev_is_pci(dev)) {
315 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700316
Will Deacona9a1b0b2014-05-01 18:05:08 +0100317 while (!pci_is_root_bus(bus))
318 bus = bus->parent;
Robin Murphyf80cd882016-09-14 15:21:39 +0100319 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100320 }
321
Robin Murphyf80cd882016-09-14 15:21:39 +0100322 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100323}
324
Robin Murphyf80cd882016-09-14 15:21:39 +0100325static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100326{
Robin Murphyf80cd882016-09-14 15:21:39 +0100327 *((__be32 *)data) = cpu_to_be32(alias);
328 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100329}
330
Robin Murphyf80cd882016-09-14 15:21:39 +0100331static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100332{
Robin Murphyf80cd882016-09-14 15:21:39 +0100333 struct of_phandle_iterator *it = *(void **)data;
334 struct device_node *np = it->node;
335 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100336
Robin Murphyf80cd882016-09-14 15:21:39 +0100337 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
338 "#stream-id-cells", 0)
339 if (it->node == np) {
340 *(void **)data = dev;
341 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700342 }
Robin Murphyf80cd882016-09-14 15:21:39 +0100343 it->node = np;
344 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100345}
346
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100347static struct platform_driver arm_smmu_driver;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100348static struct iommu_ops arm_smmu_ops;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100349
Robin Murphyadfec2e2016-09-12 17:13:55 +0100350static int arm_smmu_register_legacy_master(struct device *dev,
351 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100352{
Robin Murphyadfec2e2016-09-12 17:13:55 +0100353 struct device *smmu_dev;
Robin Murphyf80cd882016-09-14 15:21:39 +0100354 struct device_node *np;
355 struct of_phandle_iterator it;
356 void *data = &it;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100357 u32 *sids;
Robin Murphyf80cd882016-09-14 15:21:39 +0100358 __be32 pci_sid;
359 int err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100360
Robin Murphyf80cd882016-09-14 15:21:39 +0100361 np = dev_get_dev_node(dev);
362 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
363 of_node_put(np);
364 return -ENODEV;
365 }
366
367 it.node = np;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100368 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
369 __find_legacy_master_phandle);
Robin Murphyadfec2e2016-09-12 17:13:55 +0100370 smmu_dev = data;
Robin Murphyf80cd882016-09-14 15:21:39 +0100371 of_node_put(np);
372 if (err == 0)
373 return -ENODEV;
374 if (err < 0)
375 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100376
Robin Murphyf80cd882016-09-14 15:21:39 +0100377 if (dev_is_pci(dev)) {
378 /* "mmu-masters" assumes Stream ID == Requester ID */
379 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
380 &pci_sid);
381 it.cur = &pci_sid;
382 it.cur_count = 1;
383 }
384
Robin Murphyadfec2e2016-09-12 17:13:55 +0100385 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
386 &arm_smmu_ops);
387 if (err)
388 return err;
389
390 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
391 if (!sids)
Robin Murphyf80cd882016-09-14 15:21:39 +0100392 return -ENOMEM;
393
Robin Murphyadfec2e2016-09-12 17:13:55 +0100394 *smmu = dev_get_drvdata(smmu_dev);
395 of_phandle_iterator_args(&it, sids, it.cur_count);
396 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
397 kfree(sids);
398 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100399}
400
401static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
402{
403 int idx;
404
405 do {
406 idx = find_next_zero_bit(map, end, start);
407 if (idx == end)
408 return -ENOSPC;
409 } while (test_and_set_bit(idx, map));
410
411 return idx;
412}
413
414static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
415{
416 clear_bit(idx, map);
417}
418
419/* Wait for any pending TLB invalidations to complete */
Robin Murphy11febfc2017-03-30 17:56:31 +0100420static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
421 void __iomem *sync, void __iomem *status)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100422{
Robin Murphy8513c892017-03-30 17:56:32 +0100423 unsigned int spin_cnt, delay;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100424
Robin Murphy11febfc2017-03-30 17:56:31 +0100425 writel_relaxed(0, sync);
Robin Murphy8513c892017-03-30 17:56:32 +0100426 for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
427 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
428 if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
429 return;
430 cpu_relax();
Will Deacon45ae7cf2013-06-24 18:31:25 +0100431 }
Robin Murphy8513c892017-03-30 17:56:32 +0100432 udelay(delay);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100433 }
Robin Murphy8513c892017-03-30 17:56:32 +0100434 dev_err_ratelimited(smmu->dev,
435 "TLB sync timed out -- SMMU may be deadlocked\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +0100436}
437
Robin Murphy11febfc2017-03-30 17:56:31 +0100438static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
Will Deacon1463fe42013-07-31 19:21:27 +0100439{
Robin Murphy11febfc2017-03-30 17:56:31 +0100440 void __iomem *base = ARM_SMMU_GR0(smmu);
Will Deacon8e517e72017-07-06 15:55:48 +0100441 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100442
Will Deacon8e517e72017-07-06 15:55:48 +0100443 spin_lock_irqsave(&smmu->global_sync_lock, flags);
Robin Murphy11febfc2017-03-30 17:56:31 +0100444 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
445 base + ARM_SMMU_GR0_sTLBGSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100446 spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000447}
448
Robin Murphy11febfc2017-03-30 17:56:31 +0100449static void arm_smmu_tlb_sync_context(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100450{
Will Deacon518f7132014-11-14 17:17:54 +0000451 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy11febfc2017-03-30 17:56:31 +0100452 struct arm_smmu_device *smmu = smmu_domain->smmu;
453 void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
Will Deacon8e517e72017-07-06 15:55:48 +0100454 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100455
Will Deacon8e517e72017-07-06 15:55:48 +0100456 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy11febfc2017-03-30 17:56:31 +0100457 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
458 base + ARM_SMMU_CB_TLBSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100459 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000460}
461
Robin Murphy11febfc2017-03-30 17:56:31 +0100462static void arm_smmu_tlb_sync_vmid(void *cookie)
463{
464 struct arm_smmu_domain *smmu_domain = cookie;
465
466 arm_smmu_tlb_sync_global(smmu_domain->smmu);
467}
468
469static void arm_smmu_tlb_inv_context_s1(void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000470{
471 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100472 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy11febfc2017-03-30 17:56:31 +0100473 void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
474
Robin Murphy44f68762018-09-20 17:10:27 +0100475 /*
476 * NOTE: this is not a relaxed write; it needs to guarantee that PTEs
477 * cleared by the current CPU are visible to the SMMU before the TLBI.
478 */
479 writel(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
Robin Murphy11febfc2017-03-30 17:56:31 +0100480 arm_smmu_tlb_sync_context(cookie);
481}
482
483static void arm_smmu_tlb_inv_context_s2(void *cookie)
484{
485 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100486 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy11febfc2017-03-30 17:56:31 +0100487 void __iomem *base = ARM_SMMU_GR0(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100488
Robin Murphy44f68762018-09-20 17:10:27 +0100489 /* NOTE: see above */
490 writel(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
Robin Murphy11febfc2017-03-30 17:56:31 +0100491 arm_smmu_tlb_sync_global(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100492}
493
Will Deacon518f7132014-11-14 17:17:54 +0000494static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000495 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000496{
497 struct arm_smmu_domain *smmu_domain = cookie;
498 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000499 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Robin Murphy11febfc2017-03-30 17:56:31 +0100500 void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000501
Will Deacon7d321bd32018-10-01 12:42:49 +0100502 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
503 wmb();
504
Will Deacon518f7132014-11-14 17:17:54 +0000505 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +0000506 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
507
Robin Murphy7602b872016-04-28 17:12:09 +0100508 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000509 iova &= ~12UL;
Robin Murphy280b6832017-03-30 17:56:29 +0100510 iova |= cfg->asid;
Robin Murphy75df1382015-12-07 18:18:52 +0000511 do {
512 writel_relaxed(iova, reg);
513 iova += granule;
514 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000515 } else {
516 iova >>= 12;
Robin Murphy280b6832017-03-30 17:56:29 +0100517 iova |= (u64)cfg->asid << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000518 do {
519 writeq_relaxed(iova, reg);
520 iova += granule >> 12;
521 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000522 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100523 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000524 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
525 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000526 iova >>= 12;
527 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100528 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000529 iova += granule >> 12;
530 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000531 }
532}
533
Robin Murphy11febfc2017-03-30 17:56:31 +0100534/*
535 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
536 * almost negligible, but the benefit of getting the first one in as far ahead
537 * of the sync as possible is significant, hence we don't just make this a
538 * no-op and set .tlb_sync to arm_smmu_inv_context_s2() as you might think.
539 */
540static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
541 size_t granule, bool leaf, void *cookie)
542{
543 struct arm_smmu_domain *smmu_domain = cookie;
544 void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
545
Will Deacon7d321bd32018-10-01 12:42:49 +0100546 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
547 wmb();
548
Robin Murphy11febfc2017-03-30 17:56:31 +0100549 writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
550}
551
552static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = {
553 .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
Will Deacon518f7132014-11-14 17:17:54 +0000554 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
Robin Murphy11febfc2017-03-30 17:56:31 +0100555 .tlb_sync = arm_smmu_tlb_sync_context,
556};
557
558static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = {
559 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
560 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
561 .tlb_sync = arm_smmu_tlb_sync_context,
562};
563
564static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = {
565 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
566 .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync,
567 .tlb_sync = arm_smmu_tlb_sync_vmid,
Will Deacon518f7132014-11-14 17:17:54 +0000568};
569
Will Deacon45ae7cf2013-06-24 18:31:25 +0100570static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
571{
Will Deacon3714ce1d2016-08-05 19:49:45 +0100572 u32 fsr, fsynr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100573 unsigned long iova;
574 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100575 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100576 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
577 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100578 void __iomem *cb_base;
579
Robin Murphy452107c2017-03-30 17:56:30 +0100580 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100581 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
582
583 if (!(fsr & FSR_FAULT))
584 return IRQ_NONE;
585
Will Deacon45ae7cf2013-06-24 18:31:25 +0100586 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Robin Murphyf9a05f02016-04-13 18:13:01 +0100587 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100588
Will Deacon3714ce1d2016-08-05 19:49:45 +0100589 dev_err_ratelimited(smmu->dev,
590 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
591 fsr, iova, fsynr, cfg->cbndx);
592
Will Deacon45ae7cf2013-06-24 18:31:25 +0100593 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100594 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100595}
596
597static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
598{
599 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
600 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000601 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100602
603 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
604 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
605 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
606 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
607
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000608 if (!gfsr)
609 return IRQ_NONE;
610
Will Deacon45ae7cf2013-06-24 18:31:25 +0100611 dev_err_ratelimited(smmu->dev,
612 "Unexpected global fault, this could be serious\n");
613 dev_err_ratelimited(smmu->dev,
614 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
615 gfsr, gfsynr0, gfsynr1, gfsynr2);
616
617 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100618 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100619}
620
Will Deacon518f7132014-11-14 17:17:54 +0000621static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
622 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100623{
Will Deacon44680ee2014-06-25 11:29:12 +0100624 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy90df3732017-08-08 14:56:14 +0100625 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
626 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
627
628 cb->cfg = cfg;
629
630 /* TTBCR */
631 if (stage1) {
632 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
633 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
634 } else {
635 cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
636 cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
637 cb->tcr[1] |= TTBCR2_SEP_UPSTREAM;
638 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
639 cb->tcr[1] |= TTBCR2_AS;
640 }
641 } else {
642 cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
643 }
644
645 /* TTBRs */
646 if (stage1) {
647 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
648 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
649 cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
650 } else {
651 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
652 cb->ttbr[0] |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
653 cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
654 cb->ttbr[1] |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
655 }
656 } else {
657 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
658 }
659
660 /* MAIRs (stage-1 only) */
661 if (stage1) {
662 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
663 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
664 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
665 } else {
666 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
667 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
668 }
669 }
670}
671
672static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
673{
674 u32 reg;
675 bool stage1;
676 struct arm_smmu_cb *cb = &smmu->cbs[idx];
677 struct arm_smmu_cfg *cfg = cb->cfg;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100678 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100679
Robin Murphy90df3732017-08-08 14:56:14 +0100680 cb_base = ARM_SMMU_CB(smmu, idx);
681
682 /* Unassigned context banks only need disabling */
683 if (!cfg) {
684 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
685 return;
686 }
687
Will Deacon45ae7cf2013-06-24 18:31:25 +0100688 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100689 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100690
Robin Murphy90df3732017-08-08 14:56:14 +0100691 /* CBA2R */
Will Deacon4a1c93c2015-03-04 12:21:03 +0000692 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100693 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
694 reg = CBA2R_RW64_64BIT;
695 else
696 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800697 /* 16-bit VMIDs live in CBA2R */
698 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Robin Murphy280b6832017-03-30 17:56:29 +0100699 reg |= cfg->vmid << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800700
Robin Murphy90df3732017-08-08 14:56:14 +0100701 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(idx));
Will Deacon4a1c93c2015-03-04 12:21:03 +0000702 }
703
Will Deacon45ae7cf2013-06-24 18:31:25 +0100704 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100705 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100706 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700707 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100708
Will Deacon57ca90f2014-02-06 14:59:05 +0000709 /*
710 * Use the weakest shareability/memory types, so they are
711 * overridden by the ttbcr/pte.
712 */
713 if (stage1) {
714 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
715 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800716 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
717 /* 8-bit VMIDs live in CBAR */
Robin Murphy280b6832017-03-30 17:56:29 +0100718 reg |= cfg->vmid << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000719 }
Robin Murphy90df3732017-08-08 14:56:14 +0100720 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(idx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100721
Sunil Goutham125458a2017-03-28 16:11:12 +0530722 /*
723 * TTBCR
724 * We must write this before the TTBRs, since it determines the
725 * access behaviour of some fields (in particular, ASID[15:8]).
726 */
Robin Murphy90df3732017-08-08 14:56:14 +0100727 if (stage1 && smmu->version > ARM_SMMU_V1)
728 writel_relaxed(cb->tcr[1], cb_base + ARM_SMMU_CB_TTBCR2);
729 writel_relaxed(cb->tcr[0], cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100730
Will Deacon45ae7cf2013-06-24 18:31:25 +0100731 /* TTBRs */
Robin Murphy90df3732017-08-08 14:56:14 +0100732 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
733 writel_relaxed(cfg->asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
734 writel_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0);
735 writel_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100736 } else {
Robin Murphy90df3732017-08-08 14:56:14 +0100737 writeq_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0);
738 if (stage1)
739 writeq_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100740 }
741
Will Deacon518f7132014-11-14 17:17:54 +0000742 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100743 if (stage1) {
Robin Murphy90df3732017-08-08 14:56:14 +0100744 writel_relaxed(cb->mair[0], cb_base + ARM_SMMU_CB_S1_MAIR0);
745 writel_relaxed(cb->mair[1], cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100746 }
747
Will Deacon45ae7cf2013-06-24 18:31:25 +0100748 /* SCTLR */
Robin Murphy60705292016-08-11 17:44:06 +0100749 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100750 if (stage1)
751 reg |= SCTLR_S1_ASIDPNE;
Robin Murphy90df3732017-08-08 14:56:14 +0100752 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
753 reg |= SCTLR_E;
754
Will Deacon25724842013-08-21 13:49:53 +0100755 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100756}
757
758static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100759 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100760{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100761 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000762 unsigned long ias, oas;
763 struct io_pgtable_ops *pgtbl_ops;
764 struct io_pgtable_cfg pgtbl_cfg;
765 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100766 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100767 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100768
Will Deacon518f7132014-11-14 17:17:54 +0000769 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100770 if (smmu_domain->smmu)
771 goto out_unlock;
772
Will Deacon61bc6712017-01-06 16:56:03 +0000773 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
774 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
775 smmu_domain->smmu = smmu;
776 goto out_unlock;
777 }
778
Will Deaconc752ce42014-06-25 22:46:31 +0100779 /*
780 * Mapping the requested stage onto what we support is surprisingly
781 * complicated, mainly because the spec allows S1+S2 SMMUs without
782 * support for nested translation. That means we end up with the
783 * following table:
784 *
785 * Requested Supported Actual
786 * S1 N S1
787 * S1 S1+S2 S1
788 * S1 S2 S2
789 * S1 S1 S1
790 * N N N
791 * N S1+S2 S2
792 * N S2 S2
793 * N S1 S1
794 *
795 * Note that you can't actually request stage-2 mappings.
796 */
797 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
798 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
799 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
800 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
801
Robin Murphy7602b872016-04-28 17:12:09 +0100802 /*
803 * Choosing a suitable context format is even more fiddly. Until we
804 * grow some way for the caller to express a preference, and/or move
805 * the decision into the io-pgtable code where it arguably belongs,
806 * just aim for the closest thing to the rest of the system, and hope
807 * that the hardware isn't esoteric enough that we can't assume AArch64
808 * support to be a superset of AArch32 support...
809 */
810 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
811 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100812 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
813 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
814 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
815 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
816 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100817 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
818 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
819 ARM_SMMU_FEAT_FMT_AARCH64_16K |
820 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
821 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
822
823 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
824 ret = -EINVAL;
825 goto out_unlock;
826 }
827
Will Deaconc752ce42014-06-25 22:46:31 +0100828 switch (smmu_domain->stage) {
829 case ARM_SMMU_DOMAIN_S1:
830 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
831 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000832 ias = smmu->va_size;
833 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100834 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000835 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +0100836 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +0000837 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100838 ias = min(ias, 32UL);
839 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +0100840 } else {
841 fmt = ARM_V7S;
842 ias = min(ias, 32UL);
843 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +0100844 }
Robin Murphy32b12442017-09-28 15:55:01 +0100845 smmu_domain->tlb_ops = &arm_smmu_s1_tlb_ops;
Will Deaconc752ce42014-06-25 22:46:31 +0100846 break;
847 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100848 /*
849 * We will likely want to change this if/when KVM gets
850 * involved.
851 */
Will Deaconc752ce42014-06-25 22:46:31 +0100852 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100853 cfg->cbar = CBAR_TYPE_S2_TRANS;
854 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000855 ias = smmu->ipa_size;
856 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100857 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000858 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100859 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000860 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100861 ias = min(ias, 40UL);
862 oas = min(oas, 40UL);
863 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100864 if (smmu->version == ARM_SMMU_V2)
Robin Murphy32b12442017-09-28 15:55:01 +0100865 smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v2;
Robin Murphy11febfc2017-03-30 17:56:31 +0100866 else
Robin Murphy32b12442017-09-28 15:55:01 +0100867 smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v1;
Will Deaconc752ce42014-06-25 22:46:31 +0100868 break;
869 default:
870 ret = -EINVAL;
871 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100872 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100873 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
874 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200875 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100876 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100877
Will Deacon44680ee2014-06-25 11:29:12 +0100878 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100879 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100880 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
881 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100882 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100883 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100884 }
885
Robin Murphy280b6832017-03-30 17:56:29 +0100886 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
887 cfg->vmid = cfg->cbndx + 1 + smmu->cavium_id_base;
888 else
889 cfg->asid = cfg->cbndx + smmu->cavium_id_base;
890
Will Deacon518f7132014-11-14 17:17:54 +0000891 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100892 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000893 .ias = ias,
894 .oas = oas,
Robin Murphy32b12442017-09-28 15:55:01 +0100895 .tlb = smmu_domain->tlb_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100896 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000897 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100898
Robin Murphy81b3c252017-06-22 16:53:53 +0100899 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
900 pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
901
Robin Murphy44f68762018-09-20 17:10:27 +0100902 if (smmu_domain->non_strict)
903 pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
904
Will Deacon518f7132014-11-14 17:17:54 +0000905 smmu_domain->smmu = smmu;
906 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
907 if (!pgtbl_ops) {
908 ret = -ENOMEM;
909 goto out_clear_smmu;
910 }
911
Robin Murphyd5466352016-05-09 17:20:09 +0100912 /* Update the domain's page sizes to reflect the page table format */
913 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Robin Murphy455eb7d2016-09-12 17:13:58 +0100914 domain->geometry.aperture_end = (1UL << ias) - 1;
915 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +0000916
917 /* Initialise the context bank with our page table cfg */
918 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
Robin Murphy90df3732017-08-08 14:56:14 +0100919 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000920
921 /*
922 * Request context fault interrupt. Do this last to avoid the
923 * handler seeing a half-initialised domain state.
924 */
Will Deacon44680ee2014-06-25 11:29:12 +0100925 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800926 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
927 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200928 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100929 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100930 cfg->irptndx, irq);
931 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100932 }
933
Will Deacon518f7132014-11-14 17:17:54 +0000934 mutex_unlock(&smmu_domain->init_mutex);
935
936 /* Publish page table ops for map/unmap */
937 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100938 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100939
Will Deacon518f7132014-11-14 17:17:54 +0000940out_clear_smmu:
941 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100942out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +0000943 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100944 return ret;
945}
946
947static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
948{
Joerg Roedel1d672632015-03-26 13:43:10 +0100949 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100950 struct arm_smmu_device *smmu = smmu_domain->smmu;
951 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Sricharan Rd4a44f02018-12-04 11:52:10 +0530952 int ret, irq;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100953
Will Deacon61bc6712017-01-06 16:56:03 +0000954 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100955 return;
956
Sricharan Rd4a44f02018-12-04 11:52:10 +0530957 ret = arm_smmu_rpm_get(smmu);
958 if (ret < 0)
959 return;
960
Will Deacon518f7132014-11-14 17:17:54 +0000961 /*
962 * Disable the context bank and free the page tables before freeing
963 * it.
964 */
Robin Murphy90df3732017-08-08 14:56:14 +0100965 smmu->cbs[cfg->cbndx].cfg = NULL;
966 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100967
Will Deacon44680ee2014-06-25 11:29:12 +0100968 if (cfg->irptndx != INVALID_IRPTNDX) {
969 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800970 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100971 }
972
Markus Elfring44830b02015-11-06 18:32:41 +0100973 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +0100974 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Sricharan Rd4a44f02018-12-04 11:52:10 +0530975
976 arm_smmu_rpm_put(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100977}
978
Joerg Roedel1d672632015-03-26 13:43:10 +0100979static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100980{
981 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100982
Will Deacon61bc6712017-01-06 16:56:03 +0000983 if (type != IOMMU_DOMAIN_UNMANAGED &&
984 type != IOMMU_DOMAIN_DMA &&
985 type != IOMMU_DOMAIN_IDENTITY)
Joerg Roedel1d672632015-03-26 13:43:10 +0100986 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100987 /*
988 * Allocate the domain and initialise some of its data structures.
989 * We can't really do anything meaningful until we've added a
990 * master.
991 */
992 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
993 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +0100994 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100995
Robin Murphy021bb842016-09-14 15:26:46 +0100996 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
997 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +0000998 kfree(smmu_domain);
999 return NULL;
1000 }
1001
Will Deacon518f7132014-11-14 17:17:54 +00001002 mutex_init(&smmu_domain->init_mutex);
Robin Murphy523d7422017-06-22 16:53:56 +01001003 spin_lock_init(&smmu_domain->cb_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001004
1005 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001006}
1007
Joerg Roedel1d672632015-03-26 13:43:10 +01001008static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001009{
Joerg Roedel1d672632015-03-26 13:43:10 +01001010 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001011
1012 /*
1013 * Free the domain resources. We assume that all devices have
1014 * already been detached.
1015 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001016 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001017 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001018 kfree(smmu_domain);
1019}
1020
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001021static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1022{
1023 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyf80cd882016-09-14 15:21:39 +01001024 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001025
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001026 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001027 reg |= SMR_VALID;
1028 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1029}
1030
Robin Murphy8e8b2032016-09-12 17:13:50 +01001031static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1032{
1033 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1034 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1035 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1036 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1037
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001038 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
1039 smmu->smrs[idx].valid)
1040 reg |= S2CR_EXIDVALID;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001041 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1042}
1043
1044static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1045{
1046 arm_smmu_write_s2cr(smmu, idx);
1047 if (smmu->smrs)
1048 arm_smmu_write_smr(smmu, idx);
1049}
1050
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001051/*
1052 * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
1053 * should be called after sCR0 is written.
1054 */
1055static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
1056{
1057 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1058 u32 smr;
1059
1060 if (!smmu->smrs)
1061 return;
1062
1063 /*
1064 * SMR.ID bits may not be preserved if the corresponding MASK
1065 * bits are set, so check each one separately. We can reject
1066 * masters later if they try to claim IDs outside these masks.
1067 */
1068 smr = smmu->streamid_mask << SMR_ID_SHIFT;
1069 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1070 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1071 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
1072
1073 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1074 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1075 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1076 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
1077}
1078
Robin Murphy588888a2016-09-12 17:13:54 +01001079static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001080{
1081 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy588888a2016-09-12 17:13:54 +01001082 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001083
Robin Murphy588888a2016-09-12 17:13:54 +01001084 /* Stream indexing is blissfully easy */
1085 if (!smrs)
1086 return id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001087
Robin Murphy588888a2016-09-12 17:13:54 +01001088 /* Validating SMRs is... less so */
1089 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1090 if (!smrs[i].valid) {
1091 /*
1092 * Note the first free entry we come across, which
1093 * we'll claim in the end if nothing else matches.
1094 */
1095 if (free_idx < 0)
1096 free_idx = i;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001097 continue;
1098 }
Robin Murphy588888a2016-09-12 17:13:54 +01001099 /*
1100 * If the new entry is _entirely_ matched by an existing entry,
1101 * then reuse that, with the guarantee that there also cannot
1102 * be any subsequent conflicting entries. In normal use we'd
1103 * expect simply identical entries for this case, but there's
1104 * no harm in accommodating the generalisation.
1105 */
1106 if ((mask & smrs[i].mask) == mask &&
1107 !((id ^ smrs[i].id) & ~smrs[i].mask))
1108 return i;
1109 /*
1110 * If the new entry has any other overlap with an existing one,
1111 * though, then there always exists at least one stream ID
1112 * which would cause a conflict, and we can't allow that risk.
1113 */
1114 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1115 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001116 }
1117
Robin Murphy588888a2016-09-12 17:13:54 +01001118 return free_idx;
1119}
1120
1121static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1122{
1123 if (--smmu->s2crs[idx].count)
1124 return false;
1125
1126 smmu->s2crs[idx] = s2cr_init_val;
1127 if (smmu->smrs)
1128 smmu->smrs[idx].valid = false;
1129
1130 return true;
1131}
1132
1133static int arm_smmu_master_alloc_smes(struct device *dev)
1134{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001135 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001136 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy588888a2016-09-12 17:13:54 +01001137 struct arm_smmu_device *smmu = cfg->smmu;
1138 struct arm_smmu_smr *smrs = smmu->smrs;
1139 struct iommu_group *group;
1140 int i, idx, ret;
1141
1142 mutex_lock(&smmu->stream_map_mutex);
1143 /* Figure out a viable stream map entry allocation */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001144 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy021bb842016-09-14 15:26:46 +01001145 u16 sid = fwspec->ids[i];
1146 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1147
Robin Murphy588888a2016-09-12 17:13:54 +01001148 if (idx != INVALID_SMENDX) {
1149 ret = -EEXIST;
1150 goto out_err;
1151 }
1152
Robin Murphy021bb842016-09-14 15:26:46 +01001153 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy588888a2016-09-12 17:13:54 +01001154 if (ret < 0)
1155 goto out_err;
1156
1157 idx = ret;
1158 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy021bb842016-09-14 15:26:46 +01001159 smrs[idx].id = sid;
1160 smrs[idx].mask = mask;
Robin Murphy588888a2016-09-12 17:13:54 +01001161 smrs[idx].valid = true;
1162 }
1163 smmu->s2crs[idx].count++;
1164 cfg->smendx[i] = (s16)idx;
1165 }
1166
1167 group = iommu_group_get_for_dev(dev);
1168 if (!group)
1169 group = ERR_PTR(-ENOMEM);
1170 if (IS_ERR(group)) {
1171 ret = PTR_ERR(group);
1172 goto out_err;
1173 }
1174 iommu_group_put(group);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001175
Will Deacon45ae7cf2013-06-24 18:31:25 +01001176 /* It worked! Now, poke the actual hardware */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001177 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001178 arm_smmu_write_sme(smmu, idx);
1179 smmu->s2crs[idx].group = group;
1180 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001181
Robin Murphy588888a2016-09-12 17:13:54 +01001182 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001183 return 0;
1184
Robin Murphy588888a2016-09-12 17:13:54 +01001185out_err:
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001186 while (i--) {
Robin Murphy588888a2016-09-12 17:13:54 +01001187 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001188 cfg->smendx[i] = INVALID_SMENDX;
1189 }
Robin Murphy588888a2016-09-12 17:13:54 +01001190 mutex_unlock(&smmu->stream_map_mutex);
1191 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001192}
1193
Robin Murphyadfec2e2016-09-12 17:13:55 +01001194static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001195{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001196 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1197 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphyd3097e32016-09-12 17:13:53 +01001198 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001199
Robin Murphy588888a2016-09-12 17:13:54 +01001200 mutex_lock(&smmu->stream_map_mutex);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001201 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001202 if (arm_smmu_free_sme(smmu, idx))
1203 arm_smmu_write_sme(smmu, idx);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001204 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001205 }
Robin Murphy588888a2016-09-12 17:13:54 +01001206 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001207}
1208
Will Deacon45ae7cf2013-06-24 18:31:25 +01001209static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphyadfec2e2016-09-12 17:13:55 +01001210 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001211{
Will Deacon44680ee2014-06-25 11:29:12 +01001212 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001213 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001214 u8 cbndx = smmu_domain->cfg.cbndx;
Will Deacon61bc6712017-01-06 16:56:03 +00001215 enum arm_smmu_s2cr_type type;
Robin Murphy588888a2016-09-12 17:13:54 +01001216 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001217
Will Deacon61bc6712017-01-06 16:56:03 +00001218 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
1219 type = S2CR_TYPE_BYPASS;
1220 else
1221 type = S2CR_TYPE_TRANS;
1222
Robin Murphyadfec2e2016-09-12 17:13:55 +01001223 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy8e8b2032016-09-12 17:13:50 +01001224 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy588888a2016-09-12 17:13:54 +01001225 continue;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001226
Robin Murphy8e8b2032016-09-12 17:13:50 +01001227 s2cr[idx].type = type;
Sricharan Re1989802017-01-06 18:58:15 +05301228 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001229 s2cr[idx].cbndx = cbndx;
1230 arm_smmu_write_s2cr(smmu, idx);
Will Deacon43b412b2014-07-15 11:22:24 +01001231 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001232 return 0;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001233}
1234
Will Deacon45ae7cf2013-06-24 18:31:25 +01001235static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1236{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001237 int ret;
Joerg Roedel9b468f72018-11-29 14:01:00 +01001238 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001239 struct arm_smmu_device *smmu;
Joerg Roedel1d672632015-03-26 13:43:10 +01001240 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001241
Robin Murphyadfec2e2016-09-12 17:13:55 +01001242 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001243 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1244 return -ENXIO;
1245 }
1246
Robin Murphyfba4f8e2016-10-17 12:06:21 +01001247 /*
1248 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1249 * domains between of_xlate() and add_device() - we have no way to cope
1250 * with that, so until ARM gets converted to rely on groups and default
1251 * domains, just say no (but more politely than by dereferencing NULL).
1252 * This should be at least a WARN_ON once that's sorted.
1253 */
1254 if (!fwspec->iommu_priv)
1255 return -ENODEV;
1256
Robin Murphyadfec2e2016-09-12 17:13:55 +01001257 smmu = fwspec_smmu(fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301258
1259 ret = arm_smmu_rpm_get(smmu);
1260 if (ret < 0)
1261 return ret;
1262
Will Deacon518f7132014-11-14 17:17:54 +00001263 /* Ensure that the domain is finalised */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001264 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001265 if (ret < 0)
Sricharan Rd4a44f02018-12-04 11:52:10 +05301266 goto rpm_put;
Will Deacon518f7132014-11-14 17:17:54 +00001267
Will Deacon45ae7cf2013-06-24 18:31:25 +01001268 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001269 * Sanity check the domain. We don't support domains across
1270 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001271 */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001272 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001273 dev_err(dev,
1274 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphyadfec2e2016-09-12 17:13:55 +01001275 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Sricharan Rd4a44f02018-12-04 11:52:10 +05301276 ret = -EINVAL;
1277 goto rpm_put;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001278 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001279
1280 /* Looks ok, so add the device to the domain */
Sricharan Rd4a44f02018-12-04 11:52:10 +05301281 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
1282
1283rpm_put:
1284 arm_smmu_rpm_put(smmu);
1285 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001286}
1287
Will Deacon45ae7cf2013-06-24 18:31:25 +01001288static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001289 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001290{
Robin Murphy523d7422017-06-22 16:53:56 +01001291 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301292 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1293 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001294
Will Deacon518f7132014-11-14 17:17:54 +00001295 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001296 return -ENODEV;
1297
Sricharan Rd4a44f02018-12-04 11:52:10 +05301298 arm_smmu_rpm_get(smmu);
1299 ret = ops->map(ops, iova, paddr, size, prot);
1300 arm_smmu_rpm_put(smmu);
1301
1302 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001303}
1304
1305static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1306 size_t size)
1307{
Robin Murphy523d7422017-06-22 16:53:56 +01001308 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301309 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1310 size_t ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001311
Will Deacon518f7132014-11-14 17:17:54 +00001312 if (!ops)
1313 return 0;
1314
Sricharan Rd4a44f02018-12-04 11:52:10 +05301315 arm_smmu_rpm_get(smmu);
1316 ret = ops->unmap(ops, iova, size);
1317 arm_smmu_rpm_put(smmu);
1318
1319 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001320}
1321
Robin Murphy44f68762018-09-20 17:10:27 +01001322static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
1323{
1324 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301325 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy44f68762018-09-20 17:10:27 +01001326
Sricharan Rd4a44f02018-12-04 11:52:10 +05301327 if (smmu_domain->tlb_ops) {
1328 arm_smmu_rpm_get(smmu);
Robin Murphy44f68762018-09-20 17:10:27 +01001329 smmu_domain->tlb_ops->tlb_flush_all(smmu_domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301330 arm_smmu_rpm_put(smmu);
1331 }
Robin Murphy44f68762018-09-20 17:10:27 +01001332}
1333
Robin Murphy32b12442017-09-28 15:55:01 +01001334static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
1335{
1336 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301337 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy32b12442017-09-28 15:55:01 +01001338
Sricharan Rd4a44f02018-12-04 11:52:10 +05301339 if (smmu_domain->tlb_ops) {
1340 arm_smmu_rpm_get(smmu);
Robin Murphy32b12442017-09-28 15:55:01 +01001341 smmu_domain->tlb_ops->tlb_sync(smmu_domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301342 arm_smmu_rpm_put(smmu);
1343 }
Robin Murphy32b12442017-09-28 15:55:01 +01001344}
1345
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001346static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1347 dma_addr_t iova)
1348{
Joerg Roedel1d672632015-03-26 13:43:10 +01001349 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001350 struct arm_smmu_device *smmu = smmu_domain->smmu;
1351 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1352 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1353 struct device *dev = smmu->dev;
1354 void __iomem *cb_base;
1355 u32 tmp;
1356 u64 phys;
Robin Murphy523d7422017-06-22 16:53:56 +01001357 unsigned long va, flags;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301358 int ret;
1359
1360 ret = arm_smmu_rpm_get(smmu);
1361 if (ret < 0)
1362 return 0;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001363
Robin Murphy452107c2017-03-30 17:56:30 +01001364 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001365
Robin Murphy523d7422017-06-22 16:53:56 +01001366 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy661d9622015-05-27 17:09:34 +01001367 /* ATS1 registers can only be written atomically */
1368 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001369 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001370 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1371 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001372 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001373
1374 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1375 !(tmp & ATSR_ACTIVE), 5, 50)) {
Robin Murphy523d7422017-06-22 16:53:56 +01001376 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001377 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001378 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001379 &iova);
1380 return ops->iova_to_phys(ops, iova);
1381 }
1382
Robin Murphyf9a05f02016-04-13 18:13:01 +01001383 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Robin Murphy523d7422017-06-22 16:53:56 +01001384 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001385 if (phys & CB_PAR_F) {
1386 dev_err(dev, "translation fault!\n");
1387 dev_err(dev, "PAR = 0x%llx\n", phys);
1388 return 0;
1389 }
1390
Sricharan Rd4a44f02018-12-04 11:52:10 +05301391 arm_smmu_rpm_put(smmu);
1392
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001393 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1394}
1395
Will Deacon45ae7cf2013-06-24 18:31:25 +01001396static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001397 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001398{
Joerg Roedel1d672632015-03-26 13:43:10 +01001399 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Robin Murphy523d7422017-06-22 16:53:56 +01001400 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001401
Sunil Gouthambdf95922017-04-25 15:27:52 +05301402 if (domain->type == IOMMU_DOMAIN_IDENTITY)
1403 return iova;
1404
Will Deacon518f7132014-11-14 17:17:54 +00001405 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001406 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001407
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001408 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
Robin Murphy523d7422017-06-22 16:53:56 +01001409 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1410 return arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001411
Robin Murphy523d7422017-06-22 16:53:56 +01001412 return ops->iova_to_phys(ops, iova);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001413}
1414
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001415static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001416{
Will Deacond0948942014-06-24 17:30:10 +01001417 switch (cap) {
1418 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001419 /*
1420 * Return true here as the SMMU can always send out coherent
1421 * requests.
1422 */
1423 return true;
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001424 case IOMMU_CAP_NOEXEC:
1425 return true;
Will Deacond0948942014-06-24 17:30:10 +01001426 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001427 return false;
Will Deacond0948942014-06-24 17:30:10 +01001428 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001429}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001430
Robin Murphy021bb842016-09-14 15:26:46 +01001431static int arm_smmu_match_node(struct device *dev, void *data)
1432{
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001433 return dev->fwnode == data;
Robin Murphy021bb842016-09-14 15:26:46 +01001434}
1435
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001436static
1437struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
Robin Murphy021bb842016-09-14 15:26:46 +01001438{
1439 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001440 fwnode, arm_smmu_match_node);
Robin Murphy021bb842016-09-14 15:26:46 +01001441 put_device(dev);
1442 return dev ? dev_get_drvdata(dev) : NULL;
1443}
1444
Will Deacon03edb222015-01-19 14:27:33 +00001445static int arm_smmu_add_device(struct device *dev)
1446{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001447 struct arm_smmu_device *smmu;
Robin Murphyf80cd882016-09-14 15:21:39 +01001448 struct arm_smmu_master_cfg *cfg;
Joerg Roedel9b468f72018-11-29 14:01:00 +01001449 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001450 int i, ret;
1451
Robin Murphy021bb842016-09-14 15:26:46 +01001452 if (using_legacy_binding) {
1453 ret = arm_smmu_register_legacy_master(dev, &smmu);
Artem Savkova7990c62017-08-08 12:26:02 +02001454
1455 /*
1456 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
1457 * will allocate/initialise a new one. Thus we need to update fwspec for
1458 * later use.
1459 */
Joerg Roedel9b468f72018-11-29 14:01:00 +01001460 fwspec = dev_iommu_fwspec_get(dev);
Robin Murphy021bb842016-09-14 15:26:46 +01001461 if (ret)
1462 goto out_free;
Robin Murphy3c117b52016-11-02 17:31:32 +00001463 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001464 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
Robin Murphy021bb842016-09-14 15:26:46 +01001465 } else {
1466 return -ENODEV;
1467 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001468
1469 ret = -EINVAL;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001470 for (i = 0; i < fwspec->num_ids; i++) {
1471 u16 sid = fwspec->ids[i];
Robin Murphy021bb842016-09-14 15:26:46 +01001472 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyf80cd882016-09-14 15:21:39 +01001473
Robin Murphyadfec2e2016-09-12 17:13:55 +01001474 if (sid & ~smmu->streamid_mask) {
Robin Murphyf80cd882016-09-14 15:21:39 +01001475 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy021bb842016-09-14 15:26:46 +01001476 sid, smmu->streamid_mask);
1477 goto out_free;
1478 }
1479 if (mask & ~smmu->smr_mask_mask) {
1480 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
Peng Fan6323f472017-04-21 17:03:36 +08001481 mask, smmu->smr_mask_mask);
Robin Murphyf80cd882016-09-14 15:21:39 +01001482 goto out_free;
1483 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001484 }
Will Deacon03edb222015-01-19 14:27:33 +00001485
Robin Murphyadfec2e2016-09-12 17:13:55 +01001486 ret = -ENOMEM;
1487 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1488 GFP_KERNEL);
1489 if (!cfg)
1490 goto out_free;
1491
1492 cfg->smmu = smmu;
1493 fwspec->iommu_priv = cfg;
1494 while (i--)
1495 cfg->smendx[i] = INVALID_SMENDX;
1496
Sricharan Rd4a44f02018-12-04 11:52:10 +05301497 ret = arm_smmu_rpm_get(smmu);
1498 if (ret < 0)
1499 goto out_cfg_free;
1500
Robin Murphy588888a2016-09-12 17:13:54 +01001501 ret = arm_smmu_master_alloc_smes(dev);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301502 arm_smmu_rpm_put(smmu);
1503
Robin Murphyadfec2e2016-09-12 17:13:55 +01001504 if (ret)
Vivek Gautamc54451a2017-07-06 15:07:00 +05301505 goto out_cfg_free;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001506
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001507 iommu_device_link(&smmu->iommu, dev);
1508
Sricharan R655e3642018-12-04 11:52:11 +05301509 device_link_add(dev, smmu->dev,
1510 DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
1511
Robin Murphyadfec2e2016-09-12 17:13:55 +01001512 return 0;
Robin Murphyf80cd882016-09-14 15:21:39 +01001513
Vivek Gautamc54451a2017-07-06 15:07:00 +05301514out_cfg_free:
1515 kfree(cfg);
Robin Murphyf80cd882016-09-14 15:21:39 +01001516out_free:
Robin Murphyadfec2e2016-09-12 17:13:55 +01001517 iommu_fwspec_free(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001518 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00001519}
1520
Will Deacon45ae7cf2013-06-24 18:31:25 +01001521static void arm_smmu_remove_device(struct device *dev)
1522{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001523 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001524 struct arm_smmu_master_cfg *cfg;
1525 struct arm_smmu_device *smmu;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301526 int ret;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001527
Robin Murphyadfec2e2016-09-12 17:13:55 +01001528 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyf80cd882016-09-14 15:21:39 +01001529 return;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001530
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001531 cfg = fwspec->iommu_priv;
1532 smmu = cfg->smmu;
1533
Sricharan Rd4a44f02018-12-04 11:52:10 +05301534 ret = arm_smmu_rpm_get(smmu);
1535 if (ret < 0)
1536 return;
1537
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001538 iommu_device_unlink(&smmu->iommu, dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001539 arm_smmu_master_free_smes(fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301540
1541 arm_smmu_rpm_put(smmu);
1542
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001543 iommu_group_remove_device(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001544 kfree(fwspec->iommu_priv);
1545 iommu_fwspec_free(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001546}
1547
Joerg Roedelaf659932015-10-21 23:51:41 +02001548static struct iommu_group *arm_smmu_device_group(struct device *dev)
1549{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001550 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001551 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy588888a2016-09-12 17:13:54 +01001552 struct iommu_group *group = NULL;
1553 int i, idx;
1554
Robin Murphyadfec2e2016-09-12 17:13:55 +01001555 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001556 if (group && smmu->s2crs[idx].group &&
1557 group != smmu->s2crs[idx].group)
1558 return ERR_PTR(-EINVAL);
1559
1560 group = smmu->s2crs[idx].group;
1561 }
1562
1563 if (group)
Robin Murphye1b44cb2016-11-11 17:59:22 +00001564 return iommu_group_ref_get(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001565
1566 if (dev_is_pci(dev))
1567 group = pci_device_group(dev);
Nipun Guptaeab03e22018-09-10 19:19:18 +05301568 else if (dev_is_fsl_mc(dev))
1569 group = fsl_mc_device_group(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001570 else
1571 group = generic_device_group(dev);
1572
Joerg Roedelaf659932015-10-21 23:51:41 +02001573 return group;
1574}
1575
Will Deaconc752ce42014-06-25 22:46:31 +01001576static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1577 enum iommu_attr attr, void *data)
1578{
Joerg Roedel1d672632015-03-26 13:43:10 +01001579 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001580
Robin Murphy44f68762018-09-20 17:10:27 +01001581 switch(domain->type) {
1582 case IOMMU_DOMAIN_UNMANAGED:
1583 switch (attr) {
1584 case DOMAIN_ATTR_NESTING:
1585 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1586 return 0;
1587 default:
1588 return -ENODEV;
1589 }
1590 break;
1591 case IOMMU_DOMAIN_DMA:
1592 switch (attr) {
1593 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1594 *(int *)data = smmu_domain->non_strict;
1595 return 0;
1596 default:
1597 return -ENODEV;
1598 }
1599 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001600 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001601 return -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001602 }
1603}
1604
1605static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1606 enum iommu_attr attr, void *data)
1607{
Will Deacon518f7132014-11-14 17:17:54 +00001608 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001609 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001610
Will Deacon518f7132014-11-14 17:17:54 +00001611 mutex_lock(&smmu_domain->init_mutex);
1612
Robin Murphy44f68762018-09-20 17:10:27 +01001613 switch(domain->type) {
1614 case IOMMU_DOMAIN_UNMANAGED:
1615 switch (attr) {
1616 case DOMAIN_ATTR_NESTING:
1617 if (smmu_domain->smmu) {
1618 ret = -EPERM;
1619 goto out_unlock;
1620 }
1621
1622 if (*(int *)data)
1623 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1624 else
1625 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1626 break;
1627 default:
1628 ret = -ENODEV;
Will Deacon518f7132014-11-14 17:17:54 +00001629 }
Robin Murphy44f68762018-09-20 17:10:27 +01001630 break;
1631 case IOMMU_DOMAIN_DMA:
1632 switch (attr) {
1633 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1634 smmu_domain->non_strict = *(int *)data;
1635 break;
1636 default:
1637 ret = -ENODEV;
1638 }
Will Deacon518f7132014-11-14 17:17:54 +00001639 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001640 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001641 ret = -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001642 }
Will Deacon518f7132014-11-14 17:17:54 +00001643out_unlock:
1644 mutex_unlock(&smmu_domain->init_mutex);
1645 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001646}
1647
Robin Murphy021bb842016-09-14 15:26:46 +01001648static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1649{
Robin Murphy56fbf602017-03-31 12:03:33 +01001650 u32 mask, fwid = 0;
Robin Murphy021bb842016-09-14 15:26:46 +01001651
1652 if (args->args_count > 0)
1653 fwid |= (u16)args->args[0];
1654
1655 if (args->args_count > 1)
1656 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
Robin Murphy56fbf602017-03-31 12:03:33 +01001657 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
1658 fwid |= (u16)mask << SMR_MASK_SHIFT;
Robin Murphy021bb842016-09-14 15:26:46 +01001659
1660 return iommu_fwspec_add_ids(dev, &fwid, 1);
1661}
1662
Eric Augerf3ebee82017-01-19 20:57:55 +00001663static void arm_smmu_get_resv_regions(struct device *dev,
1664 struct list_head *head)
1665{
1666 struct iommu_resv_region *region;
1667 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1668
1669 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00001670 prot, IOMMU_RESV_SW_MSI);
Eric Augerf3ebee82017-01-19 20:57:55 +00001671 if (!region)
1672 return;
1673
1674 list_add_tail(&region->list, head);
Robin Murphy273df962017-03-16 17:00:19 +00001675
1676 iommu_dma_get_resv_regions(dev, head);
Eric Augerf3ebee82017-01-19 20:57:55 +00001677}
1678
1679static void arm_smmu_put_resv_regions(struct device *dev,
1680 struct list_head *head)
1681{
1682 struct iommu_resv_region *entry, *next;
1683
1684 list_for_each_entry_safe(entry, next, head, list)
1685 kfree(entry);
1686}
1687
Will Deacon518f7132014-11-14 17:17:54 +00001688static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001689 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001690 .domain_alloc = arm_smmu_domain_alloc,
1691 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001692 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001693 .map = arm_smmu_map,
1694 .unmap = arm_smmu_unmap,
Robin Murphy44f68762018-09-20 17:10:27 +01001695 .flush_iotlb_all = arm_smmu_flush_iotlb_all,
Robin Murphy32b12442017-09-28 15:55:01 +01001696 .iotlb_sync = arm_smmu_iotlb_sync,
Will Deaconc752ce42014-06-25 22:46:31 +01001697 .iova_to_phys = arm_smmu_iova_to_phys,
1698 .add_device = arm_smmu_add_device,
1699 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001700 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001701 .domain_get_attr = arm_smmu_domain_get_attr,
1702 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy021bb842016-09-14 15:26:46 +01001703 .of_xlate = arm_smmu_of_xlate,
Eric Augerf3ebee82017-01-19 20:57:55 +00001704 .get_resv_regions = arm_smmu_get_resv_regions,
1705 .put_resv_regions = arm_smmu_put_resv_regions,
Will Deacon518f7132014-11-14 17:17:54 +00001706 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001707};
1708
1709static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1710{
1711 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001712 int i;
Peng Fan3ca37122016-05-03 21:50:30 +08001713 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001714
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001715 /* clear global FSR */
1716 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1717 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001718
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001719 /*
1720 * Reset stream mapping groups: Initial values mark all SMRn as
1721 * invalid and all S2CRn as bypass unless overridden.
1722 */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001723 for (i = 0; i < smmu->num_mapping_groups; ++i)
1724 arm_smmu_write_sme(smmu, i);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001725
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301726 if (smmu->model == ARM_MMU500) {
1727 /*
1728 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1729 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1730 * bit is only present in MMU-500r2 onwards.
1731 */
1732 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1733 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
Peng Fan3ca37122016-05-03 21:50:30 +08001734 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301735 if (major >= 2)
1736 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1737 /*
1738 * Allow unmatched Stream IDs to allocate bypass
1739 * TLB entries for reduced latency.
1740 */
Feng Kan74f55d32017-10-11 15:08:39 -07001741 reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN;
Peng Fan3ca37122016-05-03 21:50:30 +08001742 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1743 }
1744
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001745 /* Make sure all context banks are disabled and clear CB_FSR */
1746 for (i = 0; i < smmu->num_context_banks; ++i) {
Robin Murphy90df3732017-08-08 14:56:14 +01001747 void __iomem *cb_base = ARM_SMMU_CB(smmu, i);
1748
1749 arm_smmu_write_context_bank(smmu, i);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001750 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001751 /*
1752 * Disable MMU-500's not-particularly-beneficial next-page
1753 * prefetcher for the sake of errata #841119 and #826419.
1754 */
1755 if (smmu->model == ARM_MMU500) {
1756 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1757 reg &= ~ARM_MMU500_ACTLR_CPRE;
1758 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1759 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001760 }
Will Deacon1463fe42013-07-31 19:21:27 +01001761
Will Deacon45ae7cf2013-06-24 18:31:25 +01001762 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001763 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1764 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1765
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001766 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001767
Will Deacon45ae7cf2013-06-24 18:31:25 +01001768 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001769 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001770
1771 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001772 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001773
Robin Murphy25a1c962016-02-10 14:25:33 +00001774 /* Enable client access, handling unmatched streams as appropriate */
1775 reg &= ~sCR0_CLIENTPD;
1776 if (disable_bypass)
1777 reg |= sCR0_USFCFG;
1778 else
1779 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001780
1781 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001782 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001783
1784 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001785 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001786
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001787 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1788 reg |= sCR0_VMID16EN;
1789
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001790 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1791 reg |= sCR0_EXIDENABLE;
1792
Will Deacon45ae7cf2013-06-24 18:31:25 +01001793 /* Push the button */
Robin Murphy11febfc2017-03-30 17:56:31 +01001794 arm_smmu_tlb_sync_global(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001795 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001796}
1797
1798static int arm_smmu_id_size_to_bits(int size)
1799{
1800 switch (size) {
1801 case 0:
1802 return 32;
1803 case 1:
1804 return 36;
1805 case 2:
1806 return 40;
1807 case 3:
1808 return 42;
1809 case 4:
1810 return 44;
1811 case 5:
1812 default:
1813 return 48;
1814 }
1815}
1816
1817static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1818{
1819 unsigned long size;
1820 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1821 u32 id;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001822 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001823 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001824
1825 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001826 dev_notice(smmu->dev, "SMMUv%d with:\n",
1827 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001828
1829 /* ID0 */
1830 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001831
1832 /* Restrict available stages based on module parameter */
1833 if (force_stage == 1)
1834 id &= ~(ID0_S2TS | ID0_NTS);
1835 else if (force_stage == 2)
1836 id &= ~(ID0_S1TS | ID0_NTS);
1837
Will Deacon45ae7cf2013-06-24 18:31:25 +01001838 if (id & ID0_S1TS) {
1839 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1840 dev_notice(smmu->dev, "\tstage 1 translation\n");
1841 }
1842
1843 if (id & ID0_S2TS) {
1844 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1845 dev_notice(smmu->dev, "\tstage 2 translation\n");
1846 }
1847
1848 if (id & ID0_NTS) {
1849 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1850 dev_notice(smmu->dev, "\tnested translation\n");
1851 }
1852
1853 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001854 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001855 dev_err(smmu->dev, "\tno translation support!\n");
1856 return -ENODEV;
1857 }
1858
Robin Murphyb7862e32016-04-13 18:13:03 +01001859 if ((id & ID0_S1TS) &&
1860 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001861 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1862 dev_notice(smmu->dev, "\taddress translation ops\n");
1863 }
1864
Robin Murphybae2c2d2015-07-29 19:46:05 +01001865 /*
1866 * In order for DMA API calls to work properly, we must defer to what
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001867 * the FW says about coherency, regardless of what the hardware claims.
Robin Murphybae2c2d2015-07-29 19:46:05 +01001868 * Fortunately, this also opens up a workaround for systems where the
1869 * ID register value has ended up configured incorrectly.
1870 */
Robin Murphybae2c2d2015-07-29 19:46:05 +01001871 cttw_reg = !!(id & ID0_CTTW);
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001872 if (cttw_fw || cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001873 dev_notice(smmu->dev, "\t%scoherent table walk\n",
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001874 cttw_fw ? "" : "non-");
1875 if (cttw_fw != cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001876 dev_notice(smmu->dev,
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001877 "\t(IDR0.CTTW overridden by FW configuration)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001878
Robin Murphy21174242016-09-12 17:13:48 +01001879 /* Max. number of entries we have for stream matching/indexing */
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001880 if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
1881 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1882 size = 1 << 16;
1883 } else {
1884 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
1885 }
Robin Murphy21174242016-09-12 17:13:48 +01001886 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001887 if (id & ID0_SMS) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001888 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy21174242016-09-12 17:13:48 +01001889 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
1890 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001891 dev_err(smmu->dev,
1892 "stream-matching supported, but no SMRs present!\n");
1893 return -ENODEV;
1894 }
1895
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001896 /* Zero-initialised to mark as invalid */
1897 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1898 GFP_KERNEL);
1899 if (!smmu->smrs)
1900 return -ENOMEM;
1901
Will Deacon45ae7cf2013-06-24 18:31:25 +01001902 dev_notice(smmu->dev,
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001903 "\tstream matching with %lu register groups", size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001904 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001905 /* s2cr->type == 0 means translation, so initialise explicitly */
1906 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1907 GFP_KERNEL);
1908 if (!smmu->s2crs)
1909 return -ENOMEM;
1910 for (i = 0; i < size; i++)
1911 smmu->s2crs[i] = s2cr_init_val;
1912
Robin Murphy21174242016-09-12 17:13:48 +01001913 smmu->num_mapping_groups = size;
Robin Murphy588888a2016-09-12 17:13:54 +01001914 mutex_init(&smmu->stream_map_mutex);
Will Deacon8e517e72017-07-06 15:55:48 +01001915 spin_lock_init(&smmu->global_sync_lock);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001916
Robin Murphy7602b872016-04-28 17:12:09 +01001917 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1918 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1919 if (!(id & ID0_PTFS_NO_AARCH32S))
1920 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1921 }
1922
Will Deacon45ae7cf2013-06-24 18:31:25 +01001923 /* ID1 */
1924 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001925 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001926
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001927 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001928 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Robin Murphy452107c2017-03-30 17:56:30 +01001929 size <<= smmu->pgshift;
1930 if (smmu->cb_base != gr0_base + size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001931 dev_warn(smmu->dev,
Robin Murphy452107c2017-03-30 17:56:30 +01001932 "SMMU address space size (0x%lx) differs from mapped region size (0x%tx)!\n",
1933 size * 2, (smmu->cb_base - gr0_base) * 2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001934
Will Deacon518f7132014-11-14 17:17:54 +00001935 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001936 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1937 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1938 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1939 return -ENODEV;
1940 }
1941 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1942 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001943 /*
1944 * Cavium CN88xx erratum #27704.
1945 * Ensure ASID and VMID allocation is unique across all SMMUs in
1946 * the system.
1947 */
1948 if (smmu->model == CAVIUM_SMMUV2) {
1949 smmu->cavium_id_base =
1950 atomic_add_return(smmu->num_context_banks,
1951 &cavium_smmu_context_count);
1952 smmu->cavium_id_base -= smmu->num_context_banks;
Robert Richter53c35dce2017-03-13 11:39:01 +01001953 dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
Robin Murphye086d912016-04-13 18:12:58 +01001954 }
Robin Murphy90df3732017-08-08 14:56:14 +01001955 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
1956 sizeof(*smmu->cbs), GFP_KERNEL);
1957 if (!smmu->cbs)
1958 return -ENOMEM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001959
1960 /* ID2 */
1961 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1962 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001963 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001964
Will Deacon518f7132014-11-14 17:17:54 +00001965 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001966 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001967 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001968
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001969 if (id & ID2_VMID16)
1970 smmu->features |= ARM_SMMU_FEAT_VMID16;
1971
Robin Murphyf1d84542015-03-04 16:41:05 +00001972 /*
1973 * What the page table walker can address actually depends on which
1974 * descriptor format is in use, but since a) we don't know that yet,
1975 * and b) it can vary per context bank, this will have to do...
1976 */
1977 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1978 dev_warn(smmu->dev,
1979 "failed to set DMA mask for table walker\n");
1980
Robin Murphyb7862e32016-04-13 18:13:03 +01001981 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001982 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001983 if (smmu->version == ARM_SMMU_V1_64K)
1984 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001985 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001986 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00001987 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001988 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001989 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001990 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001991 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001992 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001993 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001994 }
1995
Robin Murphy7602b872016-04-28 17:12:09 +01001996 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01001997 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01001998 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01001999 if (smmu->features &
2000 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01002001 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01002002 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01002003 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01002004 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01002005 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01002006
Robin Murphyd5466352016-05-09 17:20:09 +01002007 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2008 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2009 else
2010 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2011 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
2012 smmu->pgsize_bitmap);
2013
Will Deacon518f7132014-11-14 17:17:54 +00002014
Will Deacon28d60072014-09-01 16:24:48 +01002015 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
2016 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002017 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002018
2019 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
2020 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002021 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002022
Will Deacon45ae7cf2013-06-24 18:31:25 +01002023 return 0;
2024}
2025
Robin Murphy67b65a32016-04-13 18:12:57 +01002026struct arm_smmu_match_data {
2027 enum arm_smmu_arch_version version;
2028 enum arm_smmu_implementation model;
2029};
2030
2031#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
Sricharan R96a299d2018-12-04 11:52:09 +05302032static const struct arm_smmu_match_data name = { .version = ver, .model = imp }
Robin Murphy67b65a32016-04-13 18:12:57 +01002033
2034ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
2035ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01002036ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002037ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01002038ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Vivek Gautam89cddc52018-12-04 11:52:13 +05302039ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01002040
Joerg Roedel09b52692014-10-02 12:24:45 +02002041static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01002042 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
2043 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
2044 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01002045 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002046 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01002047 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Vivek Gautam89cddc52018-12-04 11:52:13 +05302048 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01002049 { },
2050};
Robin Murphy09360402014-08-28 17:51:59 +01002051
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002052#ifdef CONFIG_ACPI
2053static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
2054{
2055 int ret = 0;
2056
2057 switch (model) {
2058 case ACPI_IORT_SMMU_V1:
2059 case ACPI_IORT_SMMU_CORELINK_MMU400:
2060 smmu->version = ARM_SMMU_V1;
2061 smmu->model = GENERIC_SMMU;
2062 break;
Robin Murphy84c24372017-06-19 16:41:56 +01002063 case ACPI_IORT_SMMU_CORELINK_MMU401:
2064 smmu->version = ARM_SMMU_V1_64K;
2065 smmu->model = GENERIC_SMMU;
2066 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002067 case ACPI_IORT_SMMU_V2:
2068 smmu->version = ARM_SMMU_V2;
2069 smmu->model = GENERIC_SMMU;
2070 break;
2071 case ACPI_IORT_SMMU_CORELINK_MMU500:
2072 smmu->version = ARM_SMMU_V2;
2073 smmu->model = ARM_MMU500;
2074 break;
Robin Murphy84c24372017-06-19 16:41:56 +01002075 case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
2076 smmu->version = ARM_SMMU_V2;
2077 smmu->model = CAVIUM_SMMUV2;
2078 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002079 default:
2080 ret = -ENODEV;
2081 }
2082
2083 return ret;
2084}
2085
2086static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2087 struct arm_smmu_device *smmu)
2088{
2089 struct device *dev = smmu->dev;
2090 struct acpi_iort_node *node =
2091 *(struct acpi_iort_node **)dev_get_platdata(dev);
2092 struct acpi_iort_smmu *iort_smmu;
2093 int ret;
2094
2095 /* Retrieve SMMU1/2 specific data */
2096 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
2097
2098 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
2099 if (ret < 0)
2100 return ret;
2101
2102 /* Ignore the configuration access interrupt */
2103 smmu->num_global_irqs = 1;
2104
2105 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
2106 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2107
2108 return 0;
2109}
2110#else
2111static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2112 struct arm_smmu_device *smmu)
2113{
2114 return -ENODEV;
2115}
2116#endif
2117
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002118static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2119 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002120{
Robin Murphy67b65a32016-04-13 18:12:57 +01002121 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002122 struct device *dev = &pdev->dev;
Robin Murphy021bb842016-09-14 15:26:46 +01002123 bool legacy_binding;
2124
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002125 if (of_property_read_u32(dev->of_node, "#global-interrupts",
2126 &smmu->num_global_irqs)) {
2127 dev_err(dev, "missing #global-interrupts property\n");
2128 return -ENODEV;
2129 }
2130
2131 data = of_device_get_match_data(dev);
2132 smmu->version = data->version;
2133 smmu->model = data->model;
2134
2135 parse_driver_options(smmu);
2136
Robin Murphy021bb842016-09-14 15:26:46 +01002137 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
2138 if (legacy_binding && !using_generic_binding) {
2139 if (!using_legacy_binding)
2140 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
2141 using_legacy_binding = true;
2142 } else if (!legacy_binding && !using_legacy_binding) {
2143 using_generic_binding = true;
2144 } else {
2145 dev_err(dev, "not probing due to mismatched DT properties\n");
2146 return -ENODEV;
2147 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002148
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002149 if (of_dma_is_coherent(dev->of_node))
2150 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2151
2152 return 0;
2153}
2154
Robin Murphyf6810c12017-04-10 16:51:05 +05302155static void arm_smmu_bus_init(void)
2156{
2157 /* Oh, for a proper bus abstraction */
2158 if (!iommu_present(&platform_bus_type))
2159 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2160#ifdef CONFIG_ARM_AMBA
2161 if (!iommu_present(&amba_bustype))
2162 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2163#endif
2164#ifdef CONFIG_PCI
2165 if (!iommu_present(&pci_bus_type)) {
2166 pci_request_acs();
2167 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2168 }
2169#endif
Nipun Guptaeab03e22018-09-10 19:19:18 +05302170#ifdef CONFIG_FSL_MC_BUS
2171 if (!iommu_present(&fsl_mc_bus_type))
2172 bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
2173#endif
Robin Murphyf6810c12017-04-10 16:51:05 +05302174}
2175
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002176static int arm_smmu_device_probe(struct platform_device *pdev)
2177{
2178 struct resource *res;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002179 resource_size_t ioaddr;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002180 struct arm_smmu_device *smmu;
2181 struct device *dev = &pdev->dev;
2182 int num_irqs, i, err;
2183
Will Deacon45ae7cf2013-06-24 18:31:25 +01002184 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2185 if (!smmu) {
2186 dev_err(dev, "failed to allocate arm_smmu_device\n");
2187 return -ENOMEM;
2188 }
2189 smmu->dev = dev;
2190
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002191 if (dev->of_node)
2192 err = arm_smmu_device_dt_probe(pdev, smmu);
2193 else
2194 err = arm_smmu_device_acpi_probe(pdev, smmu);
2195
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002196 if (err)
2197 return err;
Robin Murphy09360402014-08-28 17:51:59 +01002198
Will Deacon45ae7cf2013-06-24 18:31:25 +01002199 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002200 ioaddr = res->start;
Julia Lawall8a7f4312013-08-19 12:20:37 +01002201 smmu->base = devm_ioremap_resource(dev, res);
2202 if (IS_ERR(smmu->base))
2203 return PTR_ERR(smmu->base);
Robin Murphy452107c2017-03-30 17:56:30 +01002204 smmu->cb_base = smmu->base + resource_size(res) / 2;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002205
Will Deacon45ae7cf2013-06-24 18:31:25 +01002206 num_irqs = 0;
2207 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2208 num_irqs++;
2209 if (num_irqs > smmu->num_global_irqs)
2210 smmu->num_context_irqs++;
2211 }
2212
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002213 if (!smmu->num_context_irqs) {
2214 dev_err(dev, "found %d interrupts but expected at least %d\n",
2215 num_irqs, smmu->num_global_irqs + 1);
2216 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002217 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002218
Kees Cooka86854d2018-06-12 14:07:58 -07002219 smmu->irqs = devm_kcalloc(dev, num_irqs, sizeof(*smmu->irqs),
Will Deacon45ae7cf2013-06-24 18:31:25 +01002220 GFP_KERNEL);
2221 if (!smmu->irqs) {
2222 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2223 return -ENOMEM;
2224 }
2225
2226 for (i = 0; i < num_irqs; ++i) {
2227 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002228
Will Deacon45ae7cf2013-06-24 18:31:25 +01002229 if (irq < 0) {
2230 dev_err(dev, "failed to get irq index %d\n", i);
2231 return -ENODEV;
2232 }
2233 smmu->irqs[i] = irq;
2234 }
2235
Sricharan R96a299d2018-12-04 11:52:09 +05302236 err = devm_clk_bulk_get_all(dev, &smmu->clks);
2237 if (err < 0) {
2238 dev_err(dev, "failed to get clocks %d\n", err);
2239 return err;
2240 }
2241 smmu->num_clks = err;
2242
2243 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
2244 if (err)
2245 return err;
2246
Olav Haugan3c8766d2014-08-22 17:12:32 -07002247 err = arm_smmu_device_cfg_probe(smmu);
2248 if (err)
2249 return err;
2250
Vivek Gautamd1e20222018-07-19 23:23:56 +05302251 if (smmu->version == ARM_SMMU_V2) {
2252 if (smmu->num_context_banks > smmu->num_context_irqs) {
2253 dev_err(dev,
2254 "found only %d context irq(s) but %d required\n",
2255 smmu->num_context_irqs, smmu->num_context_banks);
2256 return -ENODEV;
2257 }
2258
2259 /* Ignore superfluous interrupts */
2260 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002261 }
2262
Will Deacon45ae7cf2013-06-24 18:31:25 +01002263 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08002264 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2265 arm_smmu_global_fault,
2266 IRQF_SHARED,
2267 "arm-smmu global fault",
2268 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002269 if (err) {
2270 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2271 i, smmu->irqs[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01002272 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002273 }
2274 }
2275
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002276 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2277 "smmu.%pa", &ioaddr);
2278 if (err) {
2279 dev_err(dev, "Failed to register iommu in sysfs\n");
2280 return err;
2281 }
2282
2283 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2284 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2285
2286 err = iommu_device_register(&smmu->iommu);
2287 if (err) {
2288 dev_err(dev, "Failed to register iommu\n");
2289 return err;
2290 }
2291
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002292 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01002293 arm_smmu_device_reset(smmu);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03002294 arm_smmu_test_smr_masks(smmu);
Robin Murphy021bb842016-09-14 15:26:46 +01002295
Robin Murphyf6810c12017-04-10 16:51:05 +05302296 /*
Sricharan Rd4a44f02018-12-04 11:52:10 +05302297 * We want to avoid touching dev->power.lock in fastpaths unless
2298 * it's really going to do something useful - pm_runtime_enabled()
2299 * can serve as an ideal proxy for that decision. So, conditionally
2300 * enable pm_runtime.
2301 */
2302 if (dev->pm_domain) {
2303 pm_runtime_set_active(dev);
2304 pm_runtime_enable(dev);
2305 }
2306
2307 /*
Robin Murphyf6810c12017-04-10 16:51:05 +05302308 * For ACPI and generic DT bindings, an SMMU will be probed before
2309 * any device which might need it, so we want the bus ops in place
2310 * ready to handle default domain setup as soon as any SMMU exists.
2311 */
2312 if (!using_legacy_binding)
2313 arm_smmu_bus_init();
2314
Will Deacon45ae7cf2013-06-24 18:31:25 +01002315 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002316}
2317
Robin Murphyf6810c12017-04-10 16:51:05 +05302318/*
2319 * With the legacy DT binding in play, though, we have no guarantees about
2320 * probe order, but then we're also not doing default domains, so we can
2321 * delay setting bus ops until we're sure every possible SMMU is ready,
2322 * and that way ensure that no add_device() calls get missed.
2323 */
2324static int arm_smmu_legacy_bus_init(void)
2325{
2326 if (using_legacy_binding)
2327 arm_smmu_bus_init();
2328 return 0;
2329}
2330device_initcall_sync(arm_smmu_legacy_bus_init);
2331
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002332static void arm_smmu_device_shutdown(struct platform_device *pdev)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002333{
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002334 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002335
2336 if (!smmu)
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002337 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002338
Will Deaconecfadb62013-07-31 19:21:28 +01002339 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002340 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002341
Sricharan Rd4a44f02018-12-04 11:52:10 +05302342 arm_smmu_rpm_get(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002343 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002344 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Sricharan Rd4a44f02018-12-04 11:52:10 +05302345 arm_smmu_rpm_put(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302346
Sricharan Rd4a44f02018-12-04 11:52:10 +05302347 if (pm_runtime_enabled(smmu->dev))
2348 pm_runtime_force_suspend(smmu->dev);
2349 else
2350 clk_bulk_disable(smmu->num_clks, smmu->clks);
2351
2352 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
Nate Watterson7aa86192017-06-29 18:18:15 -04002353}
2354
Sricharan R96a299d2018-12-04 11:52:09 +05302355static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
Robin Murphya2d866f2017-08-08 14:56:15 +01002356{
2357 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
Sricharan R96a299d2018-12-04 11:52:09 +05302358 int ret;
2359
2360 ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
2361 if (ret)
2362 return ret;
Robin Murphya2d866f2017-08-08 14:56:15 +01002363
2364 arm_smmu_device_reset(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302365
Will Deacon45ae7cf2013-06-24 18:31:25 +01002366 return 0;
2367}
2368
Sricharan R96a299d2018-12-04 11:52:09 +05302369static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
Dan Carpenter6614ee72013-08-21 09:34:20 +01002370{
Sricharan R96a299d2018-12-04 11:52:09 +05302371 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2372
2373 clk_bulk_disable(smmu->num_clks, smmu->clks);
2374
2375 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002376}
2377
Robin Murphya2d866f2017-08-08 14:56:15 +01002378static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
2379{
Sricharan R96a299d2018-12-04 11:52:09 +05302380 if (pm_runtime_suspended(dev))
2381 return 0;
Robin Murphya2d866f2017-08-08 14:56:15 +01002382
Sricharan R96a299d2018-12-04 11:52:09 +05302383 return arm_smmu_runtime_resume(dev);
Robin Murphya2d866f2017-08-08 14:56:15 +01002384}
2385
Sricharan R96a299d2018-12-04 11:52:09 +05302386static int __maybe_unused arm_smmu_pm_suspend(struct device *dev)
2387{
2388 if (pm_runtime_suspended(dev))
2389 return 0;
2390
2391 return arm_smmu_runtime_suspend(dev);
2392}
2393
2394static const struct dev_pm_ops arm_smmu_pm_ops = {
2395 SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_pm_suspend, arm_smmu_pm_resume)
2396 SET_RUNTIME_PM_OPS(arm_smmu_runtime_suspend,
2397 arm_smmu_runtime_resume, NULL)
2398};
Robin Murphya2d866f2017-08-08 14:56:15 +01002399
Will Deacon45ae7cf2013-06-24 18:31:25 +01002400static struct platform_driver arm_smmu_driver = {
2401 .driver = {
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002402 .name = "arm-smmu",
2403 .of_match_table = of_match_ptr(arm_smmu_of_match),
2404 .pm = &arm_smmu_pm_ops,
2405 .suppress_bind_attrs = true,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002406 },
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002407 .probe = arm_smmu_device_probe,
Nate Watterson7aa86192017-06-29 18:18:15 -04002408 .shutdown = arm_smmu_device_shutdown,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002409};
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002410builtin_platform_driver(arm_smmu_driver);