blob: e535ae2a9e656da7ec9acd51e0d0e678e1356260 [file] [log] [blame]
Thomas Gleixner45051532019-05-29 16:57:47 -07001// SPDX-License-Identifier: GPL-2.0-only
Will Deacon45ae7cf2013-06-24 18:31:25 +01002/*
3 * IOMMU API for ARM architected SMMU implementations.
4 *
Will Deacon45ae7cf2013-06-24 18:31:25 +01005 * Copyright (C) 2013 ARM Limited
6 *
7 * Author: Will Deacon <will.deacon@arm.com>
8 *
9 * This driver currently supports:
10 * - SMMUv1 and v2 implementations
11 * - Stream-matching and stream-indexing
12 * - v7/v8 long-descriptor format
13 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010014 * - Context fault reporting
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +030015 * - Extended Stream ID (16 bit)
Will Deacon45ae7cf2013-06-24 18:31:25 +010016 */
17
18#define pr_fmt(fmt) "arm-smmu: " fmt
19
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +000020#include <linux/acpi.h>
21#include <linux/acpi_iort.h>
Robin Murphy1f3d5ca2016-09-12 17:13:49 +010022#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010023#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000024#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010025#include <linux/dma-mapping.h>
26#include <linux/err.h>
27#include <linux/interrupt.h>
28#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010029#include <linux/io-64-nonatomic-hi-lo.h>
Rob Herringb77cf112019-02-05 10:37:31 -060030#include <linux/io-pgtable.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010031#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000032#include <linux/iopoll.h>
Paul Gortmakeraddb672f2018-12-01 14:19:16 -050033#include <linux/init.h>
34#include <linux/moduleparam.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010035#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010036#include <linux/of_address.h>
Robin Murphyd6fc5d92016-09-12 17:13:52 +010037#include <linux/of_device.h>
Robin Murphyadfec2e2016-09-12 17:13:55 +010038#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010039#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010040#include <linux/platform_device.h>
Sricharan R96a299d2018-12-04 11:52:09 +053041#include <linux/pm_runtime.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010042#include <linux/slab.h>
43#include <linux/spinlock.h>
44
45#include <linux/amba/bus.h>
Nipun Guptaeab03e22018-09-10 19:19:18 +053046#include <linux/fsl/mc.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010047
Rob Clark2b037742017-08-09 10:43:03 -040048#include "arm-smmu-regs.h"
49
Robin Murphy4e4abae2019-06-03 14:15:37 +020050/*
51 * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
52 * global register space are still, in fact, using a hypervisor to mediate it
53 * by trapping and emulating register accesses. Sadly, some deployed versions
54 * of said trapping code have bugs wherein they go horribly wrong for stores
55 * using r31 (i.e. XZR/WZR) as the source register.
56 */
57#define QCOM_DUMMY_VAL -1
58
Rob Clark2b037742017-08-09 10:43:03 -040059#define ARM_MMU500_ACTLR_CPRE (1 << 1)
60
61#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
Feng Kan74f55d32017-10-11 15:08:39 -070062#define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10)
Rob Clark2b037742017-08-09 10:43:03 -040063#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
64
65#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
66#define TLB_SPIN_COUNT 10
Will Deacon45ae7cf2013-06-24 18:31:25 +010067
Will Deacon45ae7cf2013-06-24 18:31:25 +010068/* Maximum number of context banks per SMMU */
69#define ARM_SMMU_MAX_CBS 128
70
Will Deacon45ae7cf2013-06-24 18:31:25 +010071/* SMMU global address space */
72#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010073#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010074
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000075/*
76 * SMMU global address space with conditional offset to access secure
77 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
78 * nsGFSYNR0: 0x450)
79 */
80#define ARM_SMMU_GR0_NS(smmu) \
81 ((smmu)->base + \
82 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
83 ? 0x400 : 0))
84
Robin Murphyf9a05f02016-04-13 18:13:01 +010085/*
86 * Some 64-bit registers only make sense to write atomically, but in such
87 * cases all the data relevant to AArch32 formats lies within the lower word,
88 * therefore this actually makes more sense than it might first appear.
89 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010090#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010091#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010092#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010093#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010094#endif
95
Will Deacon45ae7cf2013-06-24 18:31:25 +010096/* Translation context bank */
Robin Murphy452107c2017-03-30 17:56:30 +010097#define ARM_SMMU_CB(smmu, n) ((smmu)->cb_base + ((n) << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010098
Eric Augerf3ebee82017-01-19 20:57:55 +000099#define MSI_IOVA_BASE 0x8000000
100#define MSI_IOVA_LENGTH 0x100000
101
Will Deacon4cf740b2014-07-14 19:47:39 +0100102static int force_stage;
Paul Gortmakeraddb672f2018-12-01 14:19:16 -0500103/*
104 * not really modular, but the easiest way to keep compat with existing
105 * bootargs behaviour is to continue using module_param() here.
106 */
Robin Murphy25a1c962016-02-10 14:25:33 +0000107module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100108MODULE_PARM_DESC(force_stage,
109 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Douglas Anderson954a03b2019-03-01 11:20:17 -0800110static bool disable_bypass =
111 IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT);
Robin Murphy25a1c962016-02-10 14:25:33 +0000112module_param(disable_bypass, bool, S_IRUGO);
113MODULE_PARM_DESC(disable_bypass,
114 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100115
Robin Murphy09360402014-08-28 17:51:59 +0100116enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100117 ARM_SMMU_V1,
118 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100119 ARM_SMMU_V2,
120};
121
Robin Murphy67b65a32016-04-13 18:12:57 +0100122enum arm_smmu_implementation {
123 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100124 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100125 CAVIUM_SMMUV2,
Vivek Gautam89cddc52018-12-04 11:52:13 +0530126 QCOM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100127};
128
Robin Murphy8e8b2032016-09-12 17:13:50 +0100129struct arm_smmu_s2cr {
Robin Murphy588888a2016-09-12 17:13:54 +0100130 struct iommu_group *group;
131 int count;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100132 enum arm_smmu_s2cr_type type;
133 enum arm_smmu_s2cr_privcfg privcfg;
134 u8 cbndx;
135};
136
137#define s2cr_init_val (struct arm_smmu_s2cr){ \
138 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
139}
140
Will Deacon45ae7cf2013-06-24 18:31:25 +0100141struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100142 u16 mask;
143 u16 id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100144 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100145};
146
Robin Murphy90df3732017-08-08 14:56:14 +0100147struct arm_smmu_cb {
148 u64 ttbr[2];
149 u32 tcr[2];
150 u32 mair[2];
151 struct arm_smmu_cfg *cfg;
152};
153
Will Deacona9a1b0b2014-05-01 18:05:08 +0100154struct arm_smmu_master_cfg {
Robin Murphyf80cd882016-09-14 15:21:39 +0100155 struct arm_smmu_device *smmu;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100156 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100157};
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100158#define INVALID_SMENDX -1
Robin Murphyadfec2e2016-09-12 17:13:55 +0100159#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
160#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000161#define fwspec_smendx(fw, i) \
162 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphyadfec2e2016-09-12 17:13:55 +0100163#define for_each_cfg_sme(fw, i, idx) \
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000164 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100165
166struct arm_smmu_device {
167 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100168
169 void __iomem *base;
Robin Murphy452107c2017-03-30 17:56:30 +0100170 void __iomem *cb_base;
Will Deaconc757e852014-07-30 11:33:25 +0100171 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100172
173#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
174#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
175#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
176#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
177#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000178#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800179#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100180#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
181#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
182#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
183#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
184#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300185#define ARM_SMMU_FEAT_EXIDS (1 << 12)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100186 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000187
188#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
189 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100190 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100191 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100192
193 u32 num_context_banks;
194 u32 num_s2_context_banks;
195 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
Robin Murphy90df3732017-08-08 14:56:14 +0100196 struct arm_smmu_cb *cbs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100197 atomic_t irptndx;
198
199 u32 num_mapping_groups;
Robin Murphy21174242016-09-12 17:13:48 +0100200 u16 streamid_mask;
201 u16 smr_mask_mask;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100202 struct arm_smmu_smr *smrs;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100203 struct arm_smmu_s2cr *s2crs;
Robin Murphy588888a2016-09-12 17:13:54 +0100204 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100205
Will Deacon518f7132014-11-14 17:17:54 +0000206 unsigned long va_size;
207 unsigned long ipa_size;
208 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100209 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100210
211 u32 num_global_irqs;
212 u32 num_context_irqs;
213 unsigned int *irqs;
Sricharan R96a299d2018-12-04 11:52:09 +0530214 struct clk_bulk_data *clks;
215 int num_clks;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100216
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800217 u32 cavium_id_base; /* Specific to Cavium */
Joerg Roedel9648cbc2017-02-01 18:11:36 +0100218
Will Deacon8e517e72017-07-06 15:55:48 +0100219 spinlock_t global_sync_lock;
220
Joerg Roedel9648cbc2017-02-01 18:11:36 +0100221 /* IOMMU core code handle */
222 struct iommu_device iommu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100223};
224
Robin Murphy7602b872016-04-28 17:12:09 +0100225enum arm_smmu_context_fmt {
226 ARM_SMMU_CTX_FMT_NONE,
227 ARM_SMMU_CTX_FMT_AARCH64,
228 ARM_SMMU_CTX_FMT_AARCH32_L,
229 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100230};
231
232struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100233 u8 cbndx;
234 u8 irptndx;
Robin Murphy280b6832017-03-30 17:56:29 +0100235 union {
236 u16 asid;
237 u16 vmid;
238 };
Will Deacon45ae7cf2013-06-24 18:31:25 +0100239 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100240 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100241};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100242#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100243
Will Deaconc752ce42014-06-25 22:46:31 +0100244enum arm_smmu_domain_stage {
245 ARM_SMMU_DOMAIN_S1 = 0,
246 ARM_SMMU_DOMAIN_S2,
247 ARM_SMMU_DOMAIN_NESTED,
Will Deacon61bc6712017-01-06 16:56:03 +0000248 ARM_SMMU_DOMAIN_BYPASS,
Will Deaconc752ce42014-06-25 22:46:31 +0100249};
250
Will Deacon45ae7cf2013-06-24 18:31:25 +0100251struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100252 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000253 struct io_pgtable_ops *pgtbl_ops;
Will Deacon298f78892019-07-02 16:43:34 +0100254 const struct iommu_flush_ops *tlb_ops;
Will Deacon44680ee2014-06-25 11:29:12 +0100255 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100256 enum arm_smmu_domain_stage stage;
Robin Murphy44f68762018-09-20 17:10:27 +0100257 bool non_strict;
Will Deacon518f7132014-11-14 17:17:54 +0000258 struct mutex init_mutex; /* Protects smmu pointer */
Will Deacon8e517e72017-07-06 15:55:48 +0100259 spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
Joerg Roedel1d672632015-03-26 13:43:10 +0100260 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100261};
262
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000263struct arm_smmu_option_prop {
264 u32 opt;
265 const char *prop;
266};
267
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800268static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
269
Robin Murphy021bb842016-09-14 15:26:46 +0100270static bool using_legacy_binding, using_generic_binding;
271
Mitchel Humpherys29073202014-07-08 09:52:18 -0700272static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000273 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
274 { 0, NULL},
275};
276
Sricharan Rd4a44f02018-12-04 11:52:10 +0530277static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
278{
279 if (pm_runtime_enabled(smmu->dev))
280 return pm_runtime_get_sync(smmu->dev);
281
282 return 0;
283}
284
285static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
286{
287 if (pm_runtime_enabled(smmu->dev))
288 pm_runtime_put(smmu->dev);
289}
290
Joerg Roedel1d672632015-03-26 13:43:10 +0100291static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
292{
293 return container_of(dom, struct arm_smmu_domain, domain);
294}
295
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000296static void parse_driver_options(struct arm_smmu_device *smmu)
297{
298 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700299
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000300 do {
301 if (of_property_read_bool(smmu->dev->of_node,
302 arm_smmu_options[i].prop)) {
303 smmu->options |= arm_smmu_options[i].opt;
304 dev_notice(smmu->dev, "option %s\n",
305 arm_smmu_options[i].prop);
306 }
307 } while (arm_smmu_options[++i].opt);
308}
309
Will Deacon8f68f8e2014-07-15 11:27:08 +0100310static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100311{
312 if (dev_is_pci(dev)) {
313 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700314
Will Deacona9a1b0b2014-05-01 18:05:08 +0100315 while (!pci_is_root_bus(bus))
316 bus = bus->parent;
Robin Murphyf80cd882016-09-14 15:21:39 +0100317 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100318 }
319
Robin Murphyf80cd882016-09-14 15:21:39 +0100320 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100321}
322
Robin Murphyf80cd882016-09-14 15:21:39 +0100323static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100324{
Robin Murphyf80cd882016-09-14 15:21:39 +0100325 *((__be32 *)data) = cpu_to_be32(alias);
326 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100327}
328
Robin Murphyf80cd882016-09-14 15:21:39 +0100329static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100330{
Robin Murphyf80cd882016-09-14 15:21:39 +0100331 struct of_phandle_iterator *it = *(void **)data;
332 struct device_node *np = it->node;
333 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100334
Robin Murphyf80cd882016-09-14 15:21:39 +0100335 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
336 "#stream-id-cells", 0)
337 if (it->node == np) {
338 *(void **)data = dev;
339 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700340 }
Robin Murphyf80cd882016-09-14 15:21:39 +0100341 it->node = np;
342 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100343}
344
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100345static struct platform_driver arm_smmu_driver;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100346static struct iommu_ops arm_smmu_ops;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100347
Robin Murphyadfec2e2016-09-12 17:13:55 +0100348static int arm_smmu_register_legacy_master(struct device *dev,
349 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100350{
Robin Murphyadfec2e2016-09-12 17:13:55 +0100351 struct device *smmu_dev;
Robin Murphyf80cd882016-09-14 15:21:39 +0100352 struct device_node *np;
353 struct of_phandle_iterator it;
354 void *data = &it;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100355 u32 *sids;
Robin Murphyf80cd882016-09-14 15:21:39 +0100356 __be32 pci_sid;
357 int err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100358
Robin Murphyf80cd882016-09-14 15:21:39 +0100359 np = dev_get_dev_node(dev);
360 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
361 of_node_put(np);
362 return -ENODEV;
363 }
364
365 it.node = np;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100366 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
367 __find_legacy_master_phandle);
Robin Murphyadfec2e2016-09-12 17:13:55 +0100368 smmu_dev = data;
Robin Murphyf80cd882016-09-14 15:21:39 +0100369 of_node_put(np);
370 if (err == 0)
371 return -ENODEV;
372 if (err < 0)
373 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100374
Robin Murphyf80cd882016-09-14 15:21:39 +0100375 if (dev_is_pci(dev)) {
376 /* "mmu-masters" assumes Stream ID == Requester ID */
377 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
378 &pci_sid);
379 it.cur = &pci_sid;
380 it.cur_count = 1;
381 }
382
Robin Murphyadfec2e2016-09-12 17:13:55 +0100383 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
384 &arm_smmu_ops);
385 if (err)
386 return err;
387
388 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
389 if (!sids)
Robin Murphyf80cd882016-09-14 15:21:39 +0100390 return -ENOMEM;
391
Robin Murphyadfec2e2016-09-12 17:13:55 +0100392 *smmu = dev_get_drvdata(smmu_dev);
393 of_phandle_iterator_args(&it, sids, it.cur_count);
394 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
395 kfree(sids);
396 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100397}
398
399static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
400{
401 int idx;
402
403 do {
404 idx = find_next_zero_bit(map, end, start);
405 if (idx == end)
406 return -ENOSPC;
407 } while (test_and_set_bit(idx, map));
408
409 return idx;
410}
411
412static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
413{
414 clear_bit(idx, map);
415}
416
417/* Wait for any pending TLB invalidations to complete */
Robin Murphy11febfc2017-03-30 17:56:31 +0100418static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
419 void __iomem *sync, void __iomem *status)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100420{
Robin Murphy8513c892017-03-30 17:56:32 +0100421 unsigned int spin_cnt, delay;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100422
Robin Murphy4e4abae2019-06-03 14:15:37 +0200423 writel_relaxed(QCOM_DUMMY_VAL, sync);
Robin Murphy8513c892017-03-30 17:56:32 +0100424 for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
425 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
426 if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
427 return;
428 cpu_relax();
Will Deacon45ae7cf2013-06-24 18:31:25 +0100429 }
Robin Murphy8513c892017-03-30 17:56:32 +0100430 udelay(delay);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100431 }
Robin Murphy8513c892017-03-30 17:56:32 +0100432 dev_err_ratelimited(smmu->dev,
433 "TLB sync timed out -- SMMU may be deadlocked\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +0100434}
435
Robin Murphy11febfc2017-03-30 17:56:31 +0100436static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
Will Deacon1463fe42013-07-31 19:21:27 +0100437{
Robin Murphy11febfc2017-03-30 17:56:31 +0100438 void __iomem *base = ARM_SMMU_GR0(smmu);
Will Deacon8e517e72017-07-06 15:55:48 +0100439 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100440
Will Deacon8e517e72017-07-06 15:55:48 +0100441 spin_lock_irqsave(&smmu->global_sync_lock, flags);
Robin Murphy11febfc2017-03-30 17:56:31 +0100442 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
443 base + ARM_SMMU_GR0_sTLBGSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100444 spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000445}
446
Robin Murphy11febfc2017-03-30 17:56:31 +0100447static void arm_smmu_tlb_sync_context(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100448{
Will Deacon518f7132014-11-14 17:17:54 +0000449 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy11febfc2017-03-30 17:56:31 +0100450 struct arm_smmu_device *smmu = smmu_domain->smmu;
451 void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
Will Deacon8e517e72017-07-06 15:55:48 +0100452 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100453
Will Deacon8e517e72017-07-06 15:55:48 +0100454 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy11febfc2017-03-30 17:56:31 +0100455 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
456 base + ARM_SMMU_CB_TLBSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100457 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000458}
459
Robin Murphy11febfc2017-03-30 17:56:31 +0100460static void arm_smmu_tlb_sync_vmid(void *cookie)
461{
462 struct arm_smmu_domain *smmu_domain = cookie;
463
464 arm_smmu_tlb_sync_global(smmu_domain->smmu);
465}
466
467static void arm_smmu_tlb_inv_context_s1(void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000468{
469 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100470 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy11febfc2017-03-30 17:56:31 +0100471 void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
472
Robin Murphy44f68762018-09-20 17:10:27 +0100473 /*
474 * NOTE: this is not a relaxed write; it needs to guarantee that PTEs
475 * cleared by the current CPU are visible to the SMMU before the TLBI.
476 */
477 writel(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
Robin Murphy11febfc2017-03-30 17:56:31 +0100478 arm_smmu_tlb_sync_context(cookie);
479}
480
481static void arm_smmu_tlb_inv_context_s2(void *cookie)
482{
483 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100484 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy11febfc2017-03-30 17:56:31 +0100485 void __iomem *base = ARM_SMMU_GR0(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100486
Robin Murphy44f68762018-09-20 17:10:27 +0100487 /* NOTE: see above */
488 writel(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
Robin Murphy11febfc2017-03-30 17:56:31 +0100489 arm_smmu_tlb_sync_global(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100490}
491
Will Deacon518f7132014-11-14 17:17:54 +0000492static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000493 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000494{
495 struct arm_smmu_domain *smmu_domain = cookie;
496 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000497 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Robin Murphy11febfc2017-03-30 17:56:31 +0100498 void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000499
Will Deacon7d321bd32018-10-01 12:42:49 +0100500 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
501 wmb();
502
Will Deacon518f7132014-11-14 17:17:54 +0000503 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +0000504 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
505
Robin Murphy7602b872016-04-28 17:12:09 +0100506 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000507 iova &= ~12UL;
Robin Murphy280b6832017-03-30 17:56:29 +0100508 iova |= cfg->asid;
Robin Murphy75df1382015-12-07 18:18:52 +0000509 do {
510 writel_relaxed(iova, reg);
511 iova += granule;
512 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000513 } else {
514 iova >>= 12;
Robin Murphy280b6832017-03-30 17:56:29 +0100515 iova |= (u64)cfg->asid << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000516 do {
517 writeq_relaxed(iova, reg);
518 iova += granule >> 12;
519 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000520 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100521 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000522 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
523 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000524 iova >>= 12;
525 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100526 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000527 iova += granule >> 12;
528 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000529 }
530}
531
Robin Murphy11febfc2017-03-30 17:56:31 +0100532/*
533 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
534 * almost negligible, but the benefit of getting the first one in as far ahead
535 * of the sync as possible is significant, hence we don't just make this a
536 * no-op and set .tlb_sync to arm_smmu_inv_context_s2() as you might think.
537 */
538static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
539 size_t granule, bool leaf, void *cookie)
540{
541 struct arm_smmu_domain *smmu_domain = cookie;
542 void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
543
Will Deacon7d321bd32018-10-01 12:42:49 +0100544 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
545 wmb();
546
Robin Murphy11febfc2017-03-30 17:56:31 +0100547 writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
548}
549
Will Deacon298f78892019-07-02 16:43:34 +0100550static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = {
Robin Murphy11febfc2017-03-30 17:56:31 +0100551 .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
Will Deacon518f7132014-11-14 17:17:54 +0000552 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
Robin Murphy11febfc2017-03-30 17:56:31 +0100553 .tlb_sync = arm_smmu_tlb_sync_context,
554};
555
Will Deacon298f78892019-07-02 16:43:34 +0100556static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
Robin Murphy11febfc2017-03-30 17:56:31 +0100557 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
558 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
559 .tlb_sync = arm_smmu_tlb_sync_context,
560};
561
Will Deacon298f78892019-07-02 16:43:34 +0100562static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
Robin Murphy11febfc2017-03-30 17:56:31 +0100563 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
564 .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync,
565 .tlb_sync = arm_smmu_tlb_sync_vmid,
Will Deacon518f7132014-11-14 17:17:54 +0000566};
567
Will Deacon45ae7cf2013-06-24 18:31:25 +0100568static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
569{
Vivek Gautambc580b52019-04-22 12:40:36 +0530570 u32 fsr, fsynr, cbfrsynra;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100571 unsigned long iova;
572 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100573 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100574 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
575 struct arm_smmu_device *smmu = smmu_domain->smmu;
Vivek Gautambc580b52019-04-22 12:40:36 +0530576 void __iomem *gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100577 void __iomem *cb_base;
578
Robin Murphy452107c2017-03-30 17:56:30 +0100579 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100580 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
581
582 if (!(fsr & FSR_FAULT))
583 return IRQ_NONE;
584
Will Deacon45ae7cf2013-06-24 18:31:25 +0100585 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Robin Murphyf9a05f02016-04-13 18:13:01 +0100586 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Vivek Gautambc580b52019-04-22 12:40:36 +0530587 cbfrsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100588
Will Deacon3714ce1d2016-08-05 19:49:45 +0100589 dev_err_ratelimited(smmu->dev,
Vivek Gautambc580b52019-04-22 12:40:36 +0530590 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
591 fsr, iova, fsynr, cbfrsynra, cfg->cbndx);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100592
Will Deacon45ae7cf2013-06-24 18:31:25 +0100593 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100594 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100595}
596
597static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
598{
599 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
600 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000601 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100602
603 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
604 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
605 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
606 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
607
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000608 if (!gfsr)
609 return IRQ_NONE;
610
Will Deacon45ae7cf2013-06-24 18:31:25 +0100611 dev_err_ratelimited(smmu->dev,
612 "Unexpected global fault, this could be serious\n");
613 dev_err_ratelimited(smmu->dev,
614 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
615 gfsr, gfsynr0, gfsynr1, gfsynr2);
616
617 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100618 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100619}
620
Will Deacon518f7132014-11-14 17:17:54 +0000621static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
622 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100623{
Will Deacon44680ee2014-06-25 11:29:12 +0100624 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy90df3732017-08-08 14:56:14 +0100625 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
626 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
627
628 cb->cfg = cfg;
629
630 /* TTBCR */
631 if (stage1) {
632 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
633 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
634 } else {
635 cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
636 cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
637 cb->tcr[1] |= TTBCR2_SEP_UPSTREAM;
638 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
639 cb->tcr[1] |= TTBCR2_AS;
640 }
641 } else {
642 cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
643 }
644
645 /* TTBRs */
646 if (stage1) {
647 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
648 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
649 cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
650 } else {
651 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
652 cb->ttbr[0] |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
653 cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
654 cb->ttbr[1] |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
655 }
656 } else {
657 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
658 }
659
660 /* MAIRs (stage-1 only) */
661 if (stage1) {
662 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
663 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
664 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
665 } else {
666 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
667 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
668 }
669 }
670}
671
672static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
673{
674 u32 reg;
675 bool stage1;
676 struct arm_smmu_cb *cb = &smmu->cbs[idx];
677 struct arm_smmu_cfg *cfg = cb->cfg;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100678 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100679
Robin Murphy90df3732017-08-08 14:56:14 +0100680 cb_base = ARM_SMMU_CB(smmu, idx);
681
682 /* Unassigned context banks only need disabling */
683 if (!cfg) {
684 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
685 return;
686 }
687
Will Deacon45ae7cf2013-06-24 18:31:25 +0100688 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100689 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100690
Robin Murphy90df3732017-08-08 14:56:14 +0100691 /* CBA2R */
Will Deacon4a1c93c2015-03-04 12:21:03 +0000692 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100693 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
694 reg = CBA2R_RW64_64BIT;
695 else
696 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800697 /* 16-bit VMIDs live in CBA2R */
698 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Robin Murphy280b6832017-03-30 17:56:29 +0100699 reg |= cfg->vmid << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800700
Robin Murphy90df3732017-08-08 14:56:14 +0100701 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(idx));
Will Deacon4a1c93c2015-03-04 12:21:03 +0000702 }
703
Will Deacon45ae7cf2013-06-24 18:31:25 +0100704 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100705 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100706 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700707 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100708
Will Deacon57ca90f2014-02-06 14:59:05 +0000709 /*
710 * Use the weakest shareability/memory types, so they are
711 * overridden by the ttbcr/pte.
712 */
713 if (stage1) {
714 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
715 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800716 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
717 /* 8-bit VMIDs live in CBAR */
Robin Murphy280b6832017-03-30 17:56:29 +0100718 reg |= cfg->vmid << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000719 }
Robin Murphy90df3732017-08-08 14:56:14 +0100720 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(idx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100721
Sunil Goutham125458a2017-03-28 16:11:12 +0530722 /*
723 * TTBCR
724 * We must write this before the TTBRs, since it determines the
725 * access behaviour of some fields (in particular, ASID[15:8]).
726 */
Robin Murphy90df3732017-08-08 14:56:14 +0100727 if (stage1 && smmu->version > ARM_SMMU_V1)
728 writel_relaxed(cb->tcr[1], cb_base + ARM_SMMU_CB_TTBCR2);
729 writel_relaxed(cb->tcr[0], cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100730
Will Deacon45ae7cf2013-06-24 18:31:25 +0100731 /* TTBRs */
Robin Murphy90df3732017-08-08 14:56:14 +0100732 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
733 writel_relaxed(cfg->asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
734 writel_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0);
735 writel_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100736 } else {
Robin Murphy90df3732017-08-08 14:56:14 +0100737 writeq_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0);
738 if (stage1)
739 writeq_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100740 }
741
Will Deacon518f7132014-11-14 17:17:54 +0000742 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100743 if (stage1) {
Robin Murphy90df3732017-08-08 14:56:14 +0100744 writel_relaxed(cb->mair[0], cb_base + ARM_SMMU_CB_S1_MAIR0);
745 writel_relaxed(cb->mair[1], cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100746 }
747
Will Deacon45ae7cf2013-06-24 18:31:25 +0100748 /* SCTLR */
Robin Murphy60705292016-08-11 17:44:06 +0100749 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100750 if (stage1)
751 reg |= SCTLR_S1_ASIDPNE;
Robin Murphy90df3732017-08-08 14:56:14 +0100752 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
753 reg |= SCTLR_E;
754
Will Deacon25724842013-08-21 13:49:53 +0100755 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100756}
757
758static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100759 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100760{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100761 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000762 unsigned long ias, oas;
763 struct io_pgtable_ops *pgtbl_ops;
764 struct io_pgtable_cfg pgtbl_cfg;
765 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100766 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100767 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100768
Will Deacon518f7132014-11-14 17:17:54 +0000769 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100770 if (smmu_domain->smmu)
771 goto out_unlock;
772
Will Deacon61bc6712017-01-06 16:56:03 +0000773 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
774 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
775 smmu_domain->smmu = smmu;
776 goto out_unlock;
777 }
778
Will Deaconc752ce42014-06-25 22:46:31 +0100779 /*
780 * Mapping the requested stage onto what we support is surprisingly
781 * complicated, mainly because the spec allows S1+S2 SMMUs without
782 * support for nested translation. That means we end up with the
783 * following table:
784 *
785 * Requested Supported Actual
786 * S1 N S1
787 * S1 S1+S2 S1
788 * S1 S2 S2
789 * S1 S1 S1
790 * N N N
791 * N S1+S2 S2
792 * N S2 S2
793 * N S1 S1
794 *
795 * Note that you can't actually request stage-2 mappings.
796 */
797 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
798 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
799 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
800 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
801
Robin Murphy7602b872016-04-28 17:12:09 +0100802 /*
803 * Choosing a suitable context format is even more fiddly. Until we
804 * grow some way for the caller to express a preference, and/or move
805 * the decision into the io-pgtable code where it arguably belongs,
806 * just aim for the closest thing to the rest of the system, and hope
807 * that the hardware isn't esoteric enough that we can't assume AArch64
808 * support to be a superset of AArch32 support...
809 */
810 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
811 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100812 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
813 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
814 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
815 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
816 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100817 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
818 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
819 ARM_SMMU_FEAT_FMT_AARCH64_16K |
820 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
821 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
822
823 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
824 ret = -EINVAL;
825 goto out_unlock;
826 }
827
Will Deaconc752ce42014-06-25 22:46:31 +0100828 switch (smmu_domain->stage) {
829 case ARM_SMMU_DOMAIN_S1:
830 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
831 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000832 ias = smmu->va_size;
833 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100834 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000835 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +0100836 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +0000837 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100838 ias = min(ias, 32UL);
839 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +0100840 } else {
841 fmt = ARM_V7S;
842 ias = min(ias, 32UL);
843 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +0100844 }
Robin Murphy32b12442017-09-28 15:55:01 +0100845 smmu_domain->tlb_ops = &arm_smmu_s1_tlb_ops;
Will Deaconc752ce42014-06-25 22:46:31 +0100846 break;
847 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100848 /*
849 * We will likely want to change this if/when KVM gets
850 * involved.
851 */
Will Deaconc752ce42014-06-25 22:46:31 +0100852 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100853 cfg->cbar = CBAR_TYPE_S2_TRANS;
854 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000855 ias = smmu->ipa_size;
856 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100857 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000858 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100859 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000860 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100861 ias = min(ias, 40UL);
862 oas = min(oas, 40UL);
863 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100864 if (smmu->version == ARM_SMMU_V2)
Robin Murphy32b12442017-09-28 15:55:01 +0100865 smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v2;
Robin Murphy11febfc2017-03-30 17:56:31 +0100866 else
Robin Murphy32b12442017-09-28 15:55:01 +0100867 smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v1;
Will Deaconc752ce42014-06-25 22:46:31 +0100868 break;
869 default:
870 ret = -EINVAL;
871 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100872 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100873 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
874 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200875 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100876 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100877
Will Deacon44680ee2014-06-25 11:29:12 +0100878 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100879 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100880 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
881 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100882 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100883 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100884 }
885
Robin Murphy280b6832017-03-30 17:56:29 +0100886 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
887 cfg->vmid = cfg->cbndx + 1 + smmu->cavium_id_base;
888 else
889 cfg->asid = cfg->cbndx + smmu->cavium_id_base;
890
Will Deacon518f7132014-11-14 17:17:54 +0000891 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100892 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000893 .ias = ias,
894 .oas = oas,
Will Deacon4f418452019-06-25 12:51:25 +0100895 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
Robin Murphy32b12442017-09-28 15:55:01 +0100896 .tlb = smmu_domain->tlb_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100897 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000898 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100899
Robin Murphy44f68762018-09-20 17:10:27 +0100900 if (smmu_domain->non_strict)
901 pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
902
Will Deacon518f7132014-11-14 17:17:54 +0000903 smmu_domain->smmu = smmu;
904 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
905 if (!pgtbl_ops) {
906 ret = -ENOMEM;
907 goto out_clear_smmu;
908 }
909
Robin Murphyd5466352016-05-09 17:20:09 +0100910 /* Update the domain's page sizes to reflect the page table format */
911 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Robin Murphy455eb7d2016-09-12 17:13:58 +0100912 domain->geometry.aperture_end = (1UL << ias) - 1;
913 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +0000914
915 /* Initialise the context bank with our page table cfg */
916 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
Robin Murphy90df3732017-08-08 14:56:14 +0100917 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000918
919 /*
920 * Request context fault interrupt. Do this last to avoid the
921 * handler seeing a half-initialised domain state.
922 */
Will Deacon44680ee2014-06-25 11:29:12 +0100923 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800924 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
925 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200926 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100927 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100928 cfg->irptndx, irq);
929 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100930 }
931
Will Deacon518f7132014-11-14 17:17:54 +0000932 mutex_unlock(&smmu_domain->init_mutex);
933
934 /* Publish page table ops for map/unmap */
935 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100936 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100937
Will Deacon518f7132014-11-14 17:17:54 +0000938out_clear_smmu:
939 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100940out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +0000941 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100942 return ret;
943}
944
945static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
946{
Joerg Roedel1d672632015-03-26 13:43:10 +0100947 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100948 struct arm_smmu_device *smmu = smmu_domain->smmu;
949 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Sricharan Rd4a44f02018-12-04 11:52:10 +0530950 int ret, irq;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100951
Will Deacon61bc6712017-01-06 16:56:03 +0000952 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100953 return;
954
Sricharan Rd4a44f02018-12-04 11:52:10 +0530955 ret = arm_smmu_rpm_get(smmu);
956 if (ret < 0)
957 return;
958
Will Deacon518f7132014-11-14 17:17:54 +0000959 /*
960 * Disable the context bank and free the page tables before freeing
961 * it.
962 */
Robin Murphy90df3732017-08-08 14:56:14 +0100963 smmu->cbs[cfg->cbndx].cfg = NULL;
964 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100965
Will Deacon44680ee2014-06-25 11:29:12 +0100966 if (cfg->irptndx != INVALID_IRPTNDX) {
967 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800968 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100969 }
970
Markus Elfring44830b02015-11-06 18:32:41 +0100971 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +0100972 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Sricharan Rd4a44f02018-12-04 11:52:10 +0530973
974 arm_smmu_rpm_put(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100975}
976
Joerg Roedel1d672632015-03-26 13:43:10 +0100977static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100978{
979 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100980
Will Deacon61bc6712017-01-06 16:56:03 +0000981 if (type != IOMMU_DOMAIN_UNMANAGED &&
982 type != IOMMU_DOMAIN_DMA &&
983 type != IOMMU_DOMAIN_IDENTITY)
Joerg Roedel1d672632015-03-26 13:43:10 +0100984 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100985 /*
986 * Allocate the domain and initialise some of its data structures.
987 * We can't really do anything meaningful until we've added a
988 * master.
989 */
990 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
991 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +0100992 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100993
Robin Murphy021bb842016-09-14 15:26:46 +0100994 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
995 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +0000996 kfree(smmu_domain);
997 return NULL;
998 }
999
Will Deacon518f7132014-11-14 17:17:54 +00001000 mutex_init(&smmu_domain->init_mutex);
Robin Murphy523d7422017-06-22 16:53:56 +01001001 spin_lock_init(&smmu_domain->cb_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001002
1003 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001004}
1005
Joerg Roedel1d672632015-03-26 13:43:10 +01001006static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001007{
Joerg Roedel1d672632015-03-26 13:43:10 +01001008 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001009
1010 /*
1011 * Free the domain resources. We assume that all devices have
1012 * already been detached.
1013 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001014 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001015 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001016 kfree(smmu_domain);
1017}
1018
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001019static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1020{
1021 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyf80cd882016-09-14 15:21:39 +01001022 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001023
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001024 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001025 reg |= SMR_VALID;
1026 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1027}
1028
Robin Murphy8e8b2032016-09-12 17:13:50 +01001029static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1030{
1031 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1032 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1033 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1034 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1035
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001036 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
1037 smmu->smrs[idx].valid)
1038 reg |= S2CR_EXIDVALID;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001039 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1040}
1041
1042static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1043{
1044 arm_smmu_write_s2cr(smmu, idx);
1045 if (smmu->smrs)
1046 arm_smmu_write_smr(smmu, idx);
1047}
1048
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001049/*
1050 * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
1051 * should be called after sCR0 is written.
1052 */
1053static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
1054{
1055 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1056 u32 smr;
1057
1058 if (!smmu->smrs)
1059 return;
1060
1061 /*
1062 * SMR.ID bits may not be preserved if the corresponding MASK
1063 * bits are set, so check each one separately. We can reject
1064 * masters later if they try to claim IDs outside these masks.
1065 */
1066 smr = smmu->streamid_mask << SMR_ID_SHIFT;
1067 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1068 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1069 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
1070
1071 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1072 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1073 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1074 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
1075}
1076
Robin Murphy588888a2016-09-12 17:13:54 +01001077static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001078{
1079 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy588888a2016-09-12 17:13:54 +01001080 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001081
Robin Murphy588888a2016-09-12 17:13:54 +01001082 /* Stream indexing is blissfully easy */
1083 if (!smrs)
1084 return id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001085
Robin Murphy588888a2016-09-12 17:13:54 +01001086 /* Validating SMRs is... less so */
1087 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1088 if (!smrs[i].valid) {
1089 /*
1090 * Note the first free entry we come across, which
1091 * we'll claim in the end if nothing else matches.
1092 */
1093 if (free_idx < 0)
1094 free_idx = i;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001095 continue;
1096 }
Robin Murphy588888a2016-09-12 17:13:54 +01001097 /*
1098 * If the new entry is _entirely_ matched by an existing entry,
1099 * then reuse that, with the guarantee that there also cannot
1100 * be any subsequent conflicting entries. In normal use we'd
1101 * expect simply identical entries for this case, but there's
1102 * no harm in accommodating the generalisation.
1103 */
1104 if ((mask & smrs[i].mask) == mask &&
1105 !((id ^ smrs[i].id) & ~smrs[i].mask))
1106 return i;
1107 /*
1108 * If the new entry has any other overlap with an existing one,
1109 * though, then there always exists at least one stream ID
1110 * which would cause a conflict, and we can't allow that risk.
1111 */
1112 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1113 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001114 }
1115
Robin Murphy588888a2016-09-12 17:13:54 +01001116 return free_idx;
1117}
1118
1119static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1120{
1121 if (--smmu->s2crs[idx].count)
1122 return false;
1123
1124 smmu->s2crs[idx] = s2cr_init_val;
1125 if (smmu->smrs)
1126 smmu->smrs[idx].valid = false;
1127
1128 return true;
1129}
1130
1131static int arm_smmu_master_alloc_smes(struct device *dev)
1132{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001133 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001134 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy588888a2016-09-12 17:13:54 +01001135 struct arm_smmu_device *smmu = cfg->smmu;
1136 struct arm_smmu_smr *smrs = smmu->smrs;
1137 struct iommu_group *group;
1138 int i, idx, ret;
1139
1140 mutex_lock(&smmu->stream_map_mutex);
1141 /* Figure out a viable stream map entry allocation */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001142 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy021bb842016-09-14 15:26:46 +01001143 u16 sid = fwspec->ids[i];
1144 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1145
Robin Murphy588888a2016-09-12 17:13:54 +01001146 if (idx != INVALID_SMENDX) {
1147 ret = -EEXIST;
1148 goto out_err;
1149 }
1150
Robin Murphy021bb842016-09-14 15:26:46 +01001151 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy588888a2016-09-12 17:13:54 +01001152 if (ret < 0)
1153 goto out_err;
1154
1155 idx = ret;
1156 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy021bb842016-09-14 15:26:46 +01001157 smrs[idx].id = sid;
1158 smrs[idx].mask = mask;
Robin Murphy588888a2016-09-12 17:13:54 +01001159 smrs[idx].valid = true;
1160 }
1161 smmu->s2crs[idx].count++;
1162 cfg->smendx[i] = (s16)idx;
1163 }
1164
1165 group = iommu_group_get_for_dev(dev);
1166 if (!group)
1167 group = ERR_PTR(-ENOMEM);
1168 if (IS_ERR(group)) {
1169 ret = PTR_ERR(group);
1170 goto out_err;
1171 }
1172 iommu_group_put(group);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001173
Will Deacon45ae7cf2013-06-24 18:31:25 +01001174 /* It worked! Now, poke the actual hardware */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001175 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001176 arm_smmu_write_sme(smmu, idx);
1177 smmu->s2crs[idx].group = group;
1178 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001179
Robin Murphy588888a2016-09-12 17:13:54 +01001180 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001181 return 0;
1182
Robin Murphy588888a2016-09-12 17:13:54 +01001183out_err:
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001184 while (i--) {
Robin Murphy588888a2016-09-12 17:13:54 +01001185 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001186 cfg->smendx[i] = INVALID_SMENDX;
1187 }
Robin Murphy588888a2016-09-12 17:13:54 +01001188 mutex_unlock(&smmu->stream_map_mutex);
1189 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001190}
1191
Robin Murphyadfec2e2016-09-12 17:13:55 +01001192static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001193{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001194 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1195 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphyd3097e32016-09-12 17:13:53 +01001196 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001197
Robin Murphy588888a2016-09-12 17:13:54 +01001198 mutex_lock(&smmu->stream_map_mutex);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001199 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001200 if (arm_smmu_free_sme(smmu, idx))
1201 arm_smmu_write_sme(smmu, idx);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001202 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001203 }
Robin Murphy588888a2016-09-12 17:13:54 +01001204 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001205}
1206
Will Deacon45ae7cf2013-06-24 18:31:25 +01001207static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphyadfec2e2016-09-12 17:13:55 +01001208 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001209{
Will Deacon44680ee2014-06-25 11:29:12 +01001210 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001211 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001212 u8 cbndx = smmu_domain->cfg.cbndx;
Will Deacon61bc6712017-01-06 16:56:03 +00001213 enum arm_smmu_s2cr_type type;
Robin Murphy588888a2016-09-12 17:13:54 +01001214 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001215
Will Deacon61bc6712017-01-06 16:56:03 +00001216 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
1217 type = S2CR_TYPE_BYPASS;
1218 else
1219 type = S2CR_TYPE_TRANS;
1220
Robin Murphyadfec2e2016-09-12 17:13:55 +01001221 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy8e8b2032016-09-12 17:13:50 +01001222 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy588888a2016-09-12 17:13:54 +01001223 continue;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001224
Robin Murphy8e8b2032016-09-12 17:13:50 +01001225 s2cr[idx].type = type;
Sricharan Re1989802017-01-06 18:58:15 +05301226 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001227 s2cr[idx].cbndx = cbndx;
1228 arm_smmu_write_s2cr(smmu, idx);
Will Deacon43b412b2014-07-15 11:22:24 +01001229 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001230 return 0;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001231}
1232
Will Deacon45ae7cf2013-06-24 18:31:25 +01001233static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1234{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001235 int ret;
Joerg Roedel9b468f72018-11-29 14:01:00 +01001236 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001237 struct arm_smmu_device *smmu;
Joerg Roedel1d672632015-03-26 13:43:10 +01001238 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001239
Robin Murphyadfec2e2016-09-12 17:13:55 +01001240 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001241 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1242 return -ENXIO;
1243 }
1244
Robin Murphyfba4f8e2016-10-17 12:06:21 +01001245 /*
1246 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1247 * domains between of_xlate() and add_device() - we have no way to cope
1248 * with that, so until ARM gets converted to rely on groups and default
1249 * domains, just say no (but more politely than by dereferencing NULL).
1250 * This should be at least a WARN_ON once that's sorted.
1251 */
1252 if (!fwspec->iommu_priv)
1253 return -ENODEV;
1254
Robin Murphyadfec2e2016-09-12 17:13:55 +01001255 smmu = fwspec_smmu(fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301256
1257 ret = arm_smmu_rpm_get(smmu);
1258 if (ret < 0)
1259 return ret;
1260
Will Deacon518f7132014-11-14 17:17:54 +00001261 /* Ensure that the domain is finalised */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001262 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001263 if (ret < 0)
Sricharan Rd4a44f02018-12-04 11:52:10 +05301264 goto rpm_put;
Will Deacon518f7132014-11-14 17:17:54 +00001265
Will Deacon45ae7cf2013-06-24 18:31:25 +01001266 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001267 * Sanity check the domain. We don't support domains across
1268 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001269 */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001270 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001271 dev_err(dev,
1272 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphyadfec2e2016-09-12 17:13:55 +01001273 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Sricharan Rd4a44f02018-12-04 11:52:10 +05301274 ret = -EINVAL;
1275 goto rpm_put;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001276 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001277
1278 /* Looks ok, so add the device to the domain */
Sricharan Rd4a44f02018-12-04 11:52:10 +05301279 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
1280
1281rpm_put:
1282 arm_smmu_rpm_put(smmu);
1283 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001284}
1285
Will Deacon45ae7cf2013-06-24 18:31:25 +01001286static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001287 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001288{
Robin Murphy523d7422017-06-22 16:53:56 +01001289 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301290 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1291 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001292
Will Deacon518f7132014-11-14 17:17:54 +00001293 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001294 return -ENODEV;
1295
Sricharan Rd4a44f02018-12-04 11:52:10 +05301296 arm_smmu_rpm_get(smmu);
1297 ret = ops->map(ops, iova, paddr, size, prot);
1298 arm_smmu_rpm_put(smmu);
1299
1300 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001301}
1302
1303static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
Will Deacon56f8af52019-07-02 16:44:06 +01001304 size_t size, struct iommu_iotlb_gather *gather)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001305{
Robin Murphy523d7422017-06-22 16:53:56 +01001306 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301307 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1308 size_t ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001309
Will Deacon518f7132014-11-14 17:17:54 +00001310 if (!ops)
1311 return 0;
1312
Sricharan Rd4a44f02018-12-04 11:52:10 +05301313 arm_smmu_rpm_get(smmu);
1314 ret = ops->unmap(ops, iova, size);
1315 arm_smmu_rpm_put(smmu);
1316
1317 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001318}
1319
Robin Murphy44f68762018-09-20 17:10:27 +01001320static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
1321{
1322 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301323 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy44f68762018-09-20 17:10:27 +01001324
Sricharan Rd4a44f02018-12-04 11:52:10 +05301325 if (smmu_domain->tlb_ops) {
1326 arm_smmu_rpm_get(smmu);
Robin Murphy44f68762018-09-20 17:10:27 +01001327 smmu_domain->tlb_ops->tlb_flush_all(smmu_domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301328 arm_smmu_rpm_put(smmu);
1329 }
Robin Murphy44f68762018-09-20 17:10:27 +01001330}
1331
Will Deacon56f8af52019-07-02 16:44:06 +01001332static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
1333 struct iommu_iotlb_gather *gather)
Robin Murphy32b12442017-09-28 15:55:01 +01001334{
1335 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301336 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy32b12442017-09-28 15:55:01 +01001337
Sricharan Rd4a44f02018-12-04 11:52:10 +05301338 if (smmu_domain->tlb_ops) {
1339 arm_smmu_rpm_get(smmu);
Robin Murphy32b12442017-09-28 15:55:01 +01001340 smmu_domain->tlb_ops->tlb_sync(smmu_domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301341 arm_smmu_rpm_put(smmu);
1342 }
Robin Murphy32b12442017-09-28 15:55:01 +01001343}
1344
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001345static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1346 dma_addr_t iova)
1347{
Joerg Roedel1d672632015-03-26 13:43:10 +01001348 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001349 struct arm_smmu_device *smmu = smmu_domain->smmu;
1350 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1351 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1352 struct device *dev = smmu->dev;
1353 void __iomem *cb_base;
1354 u32 tmp;
1355 u64 phys;
Robin Murphy523d7422017-06-22 16:53:56 +01001356 unsigned long va, flags;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301357 int ret;
1358
1359 ret = arm_smmu_rpm_get(smmu);
1360 if (ret < 0)
1361 return 0;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001362
Robin Murphy452107c2017-03-30 17:56:30 +01001363 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001364
Robin Murphy523d7422017-06-22 16:53:56 +01001365 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy661d9622015-05-27 17:09:34 +01001366 /* ATS1 registers can only be written atomically */
1367 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001368 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001369 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1370 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001371 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001372
1373 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1374 !(tmp & ATSR_ACTIVE), 5, 50)) {
Robin Murphy523d7422017-06-22 16:53:56 +01001375 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001376 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001377 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001378 &iova);
1379 return ops->iova_to_phys(ops, iova);
1380 }
1381
Robin Murphyf9a05f02016-04-13 18:13:01 +01001382 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Robin Murphy523d7422017-06-22 16:53:56 +01001383 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001384 if (phys & CB_PAR_F) {
1385 dev_err(dev, "translation fault!\n");
1386 dev_err(dev, "PAR = 0x%llx\n", phys);
1387 return 0;
1388 }
1389
Sricharan Rd4a44f02018-12-04 11:52:10 +05301390 arm_smmu_rpm_put(smmu);
1391
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001392 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1393}
1394
Will Deacon45ae7cf2013-06-24 18:31:25 +01001395static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001396 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001397{
Joerg Roedel1d672632015-03-26 13:43:10 +01001398 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Robin Murphy523d7422017-06-22 16:53:56 +01001399 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001400
Sunil Gouthambdf95922017-04-25 15:27:52 +05301401 if (domain->type == IOMMU_DOMAIN_IDENTITY)
1402 return iova;
1403
Will Deacon518f7132014-11-14 17:17:54 +00001404 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001405 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001406
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001407 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
Robin Murphy523d7422017-06-22 16:53:56 +01001408 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1409 return arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001410
Robin Murphy523d7422017-06-22 16:53:56 +01001411 return ops->iova_to_phys(ops, iova);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001412}
1413
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001414static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001415{
Will Deacond0948942014-06-24 17:30:10 +01001416 switch (cap) {
1417 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001418 /*
1419 * Return true here as the SMMU can always send out coherent
1420 * requests.
1421 */
1422 return true;
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001423 case IOMMU_CAP_NOEXEC:
1424 return true;
Will Deacond0948942014-06-24 17:30:10 +01001425 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001426 return false;
Will Deacond0948942014-06-24 17:30:10 +01001427 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001428}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001429
Suzuki K Poulose92ce7e82019-06-14 18:54:00 +01001430static int arm_smmu_match_node(struct device *dev, const void *data)
Robin Murphy021bb842016-09-14 15:26:46 +01001431{
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001432 return dev->fwnode == data;
Robin Murphy021bb842016-09-14 15:26:46 +01001433}
1434
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001435static
1436struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
Robin Murphy021bb842016-09-14 15:26:46 +01001437{
1438 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001439 fwnode, arm_smmu_match_node);
Robin Murphy021bb842016-09-14 15:26:46 +01001440 put_device(dev);
1441 return dev ? dev_get_drvdata(dev) : NULL;
1442}
1443
Will Deacon03edb222015-01-19 14:27:33 +00001444static int arm_smmu_add_device(struct device *dev)
1445{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001446 struct arm_smmu_device *smmu;
Robin Murphyf80cd882016-09-14 15:21:39 +01001447 struct arm_smmu_master_cfg *cfg;
Joerg Roedel9b468f72018-11-29 14:01:00 +01001448 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001449 int i, ret;
1450
Robin Murphy021bb842016-09-14 15:26:46 +01001451 if (using_legacy_binding) {
1452 ret = arm_smmu_register_legacy_master(dev, &smmu);
Artem Savkova7990c62017-08-08 12:26:02 +02001453
1454 /*
1455 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
1456 * will allocate/initialise a new one. Thus we need to update fwspec for
1457 * later use.
1458 */
Joerg Roedel9b468f72018-11-29 14:01:00 +01001459 fwspec = dev_iommu_fwspec_get(dev);
Robin Murphy021bb842016-09-14 15:26:46 +01001460 if (ret)
1461 goto out_free;
Robin Murphy3c117b52016-11-02 17:31:32 +00001462 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001463 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
Robin Murphy021bb842016-09-14 15:26:46 +01001464 } else {
1465 return -ENODEV;
1466 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001467
1468 ret = -EINVAL;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001469 for (i = 0; i < fwspec->num_ids; i++) {
1470 u16 sid = fwspec->ids[i];
Robin Murphy021bb842016-09-14 15:26:46 +01001471 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyf80cd882016-09-14 15:21:39 +01001472
Robin Murphyadfec2e2016-09-12 17:13:55 +01001473 if (sid & ~smmu->streamid_mask) {
Robin Murphyf80cd882016-09-14 15:21:39 +01001474 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy021bb842016-09-14 15:26:46 +01001475 sid, smmu->streamid_mask);
1476 goto out_free;
1477 }
1478 if (mask & ~smmu->smr_mask_mask) {
1479 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
Peng Fan6323f472017-04-21 17:03:36 +08001480 mask, smmu->smr_mask_mask);
Robin Murphyf80cd882016-09-14 15:21:39 +01001481 goto out_free;
1482 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001483 }
Will Deacon03edb222015-01-19 14:27:33 +00001484
Robin Murphyadfec2e2016-09-12 17:13:55 +01001485 ret = -ENOMEM;
1486 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1487 GFP_KERNEL);
1488 if (!cfg)
1489 goto out_free;
1490
1491 cfg->smmu = smmu;
1492 fwspec->iommu_priv = cfg;
1493 while (i--)
1494 cfg->smendx[i] = INVALID_SMENDX;
1495
Sricharan Rd4a44f02018-12-04 11:52:10 +05301496 ret = arm_smmu_rpm_get(smmu);
1497 if (ret < 0)
1498 goto out_cfg_free;
1499
Robin Murphy588888a2016-09-12 17:13:54 +01001500 ret = arm_smmu_master_alloc_smes(dev);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301501 arm_smmu_rpm_put(smmu);
1502
Robin Murphyadfec2e2016-09-12 17:13:55 +01001503 if (ret)
Vivek Gautamc54451a2017-07-06 15:07:00 +05301504 goto out_cfg_free;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001505
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001506 iommu_device_link(&smmu->iommu, dev);
1507
Sricharan R655e3642018-12-04 11:52:11 +05301508 device_link_add(dev, smmu->dev,
1509 DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
1510
Robin Murphyadfec2e2016-09-12 17:13:55 +01001511 return 0;
Robin Murphyf80cd882016-09-14 15:21:39 +01001512
Vivek Gautamc54451a2017-07-06 15:07:00 +05301513out_cfg_free:
1514 kfree(cfg);
Robin Murphyf80cd882016-09-14 15:21:39 +01001515out_free:
Robin Murphyadfec2e2016-09-12 17:13:55 +01001516 iommu_fwspec_free(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001517 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00001518}
1519
Will Deacon45ae7cf2013-06-24 18:31:25 +01001520static void arm_smmu_remove_device(struct device *dev)
1521{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001522 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001523 struct arm_smmu_master_cfg *cfg;
1524 struct arm_smmu_device *smmu;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301525 int ret;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001526
Robin Murphyadfec2e2016-09-12 17:13:55 +01001527 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyf80cd882016-09-14 15:21:39 +01001528 return;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001529
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001530 cfg = fwspec->iommu_priv;
1531 smmu = cfg->smmu;
1532
Sricharan Rd4a44f02018-12-04 11:52:10 +05301533 ret = arm_smmu_rpm_get(smmu);
1534 if (ret < 0)
1535 return;
1536
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001537 iommu_device_unlink(&smmu->iommu, dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001538 arm_smmu_master_free_smes(fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301539
1540 arm_smmu_rpm_put(smmu);
1541
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001542 iommu_group_remove_device(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001543 kfree(fwspec->iommu_priv);
1544 iommu_fwspec_free(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001545}
1546
Joerg Roedelaf659932015-10-21 23:51:41 +02001547static struct iommu_group *arm_smmu_device_group(struct device *dev)
1548{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001549 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001550 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy588888a2016-09-12 17:13:54 +01001551 struct iommu_group *group = NULL;
1552 int i, idx;
1553
Robin Murphyadfec2e2016-09-12 17:13:55 +01001554 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001555 if (group && smmu->s2crs[idx].group &&
1556 group != smmu->s2crs[idx].group)
1557 return ERR_PTR(-EINVAL);
1558
1559 group = smmu->s2crs[idx].group;
1560 }
1561
1562 if (group)
Robin Murphye1b44cb2016-11-11 17:59:22 +00001563 return iommu_group_ref_get(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001564
1565 if (dev_is_pci(dev))
1566 group = pci_device_group(dev);
Nipun Guptaeab03e22018-09-10 19:19:18 +05301567 else if (dev_is_fsl_mc(dev))
1568 group = fsl_mc_device_group(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001569 else
1570 group = generic_device_group(dev);
1571
Joerg Roedelaf659932015-10-21 23:51:41 +02001572 return group;
1573}
1574
Will Deaconc752ce42014-06-25 22:46:31 +01001575static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1576 enum iommu_attr attr, void *data)
1577{
Joerg Roedel1d672632015-03-26 13:43:10 +01001578 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001579
Robin Murphy44f68762018-09-20 17:10:27 +01001580 switch(domain->type) {
1581 case IOMMU_DOMAIN_UNMANAGED:
1582 switch (attr) {
1583 case DOMAIN_ATTR_NESTING:
1584 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1585 return 0;
1586 default:
1587 return -ENODEV;
1588 }
1589 break;
1590 case IOMMU_DOMAIN_DMA:
1591 switch (attr) {
1592 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1593 *(int *)data = smmu_domain->non_strict;
1594 return 0;
1595 default:
1596 return -ENODEV;
1597 }
1598 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001599 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001600 return -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001601 }
1602}
1603
1604static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1605 enum iommu_attr attr, void *data)
1606{
Will Deacon518f7132014-11-14 17:17:54 +00001607 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001608 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001609
Will Deacon518f7132014-11-14 17:17:54 +00001610 mutex_lock(&smmu_domain->init_mutex);
1611
Robin Murphy44f68762018-09-20 17:10:27 +01001612 switch(domain->type) {
1613 case IOMMU_DOMAIN_UNMANAGED:
1614 switch (attr) {
1615 case DOMAIN_ATTR_NESTING:
1616 if (smmu_domain->smmu) {
1617 ret = -EPERM;
1618 goto out_unlock;
1619 }
1620
1621 if (*(int *)data)
1622 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1623 else
1624 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1625 break;
1626 default:
1627 ret = -ENODEV;
Will Deacon518f7132014-11-14 17:17:54 +00001628 }
Robin Murphy44f68762018-09-20 17:10:27 +01001629 break;
1630 case IOMMU_DOMAIN_DMA:
1631 switch (attr) {
1632 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1633 smmu_domain->non_strict = *(int *)data;
1634 break;
1635 default:
1636 ret = -ENODEV;
1637 }
Will Deacon518f7132014-11-14 17:17:54 +00001638 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001639 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001640 ret = -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001641 }
Will Deacon518f7132014-11-14 17:17:54 +00001642out_unlock:
1643 mutex_unlock(&smmu_domain->init_mutex);
1644 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001645}
1646
Robin Murphy021bb842016-09-14 15:26:46 +01001647static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1648{
Robin Murphy56fbf602017-03-31 12:03:33 +01001649 u32 mask, fwid = 0;
Robin Murphy021bb842016-09-14 15:26:46 +01001650
1651 if (args->args_count > 0)
1652 fwid |= (u16)args->args[0];
1653
1654 if (args->args_count > 1)
1655 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
Robin Murphy56fbf602017-03-31 12:03:33 +01001656 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
1657 fwid |= (u16)mask << SMR_MASK_SHIFT;
Robin Murphy021bb842016-09-14 15:26:46 +01001658
1659 return iommu_fwspec_add_ids(dev, &fwid, 1);
1660}
1661
Eric Augerf3ebee82017-01-19 20:57:55 +00001662static void arm_smmu_get_resv_regions(struct device *dev,
1663 struct list_head *head)
1664{
1665 struct iommu_resv_region *region;
1666 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1667
1668 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00001669 prot, IOMMU_RESV_SW_MSI);
Eric Augerf3ebee82017-01-19 20:57:55 +00001670 if (!region)
1671 return;
1672
1673 list_add_tail(&region->list, head);
Robin Murphy273df962017-03-16 17:00:19 +00001674
1675 iommu_dma_get_resv_regions(dev, head);
Eric Augerf3ebee82017-01-19 20:57:55 +00001676}
1677
1678static void arm_smmu_put_resv_regions(struct device *dev,
1679 struct list_head *head)
1680{
1681 struct iommu_resv_region *entry, *next;
1682
1683 list_for_each_entry_safe(entry, next, head, list)
1684 kfree(entry);
1685}
1686
Will Deacon518f7132014-11-14 17:17:54 +00001687static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001688 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001689 .domain_alloc = arm_smmu_domain_alloc,
1690 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001691 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001692 .map = arm_smmu_map,
1693 .unmap = arm_smmu_unmap,
Robin Murphy44f68762018-09-20 17:10:27 +01001694 .flush_iotlb_all = arm_smmu_flush_iotlb_all,
Robin Murphy32b12442017-09-28 15:55:01 +01001695 .iotlb_sync = arm_smmu_iotlb_sync,
Will Deaconc752ce42014-06-25 22:46:31 +01001696 .iova_to_phys = arm_smmu_iova_to_phys,
1697 .add_device = arm_smmu_add_device,
1698 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001699 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001700 .domain_get_attr = arm_smmu_domain_get_attr,
1701 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy021bb842016-09-14 15:26:46 +01001702 .of_xlate = arm_smmu_of_xlate,
Eric Augerf3ebee82017-01-19 20:57:55 +00001703 .get_resv_regions = arm_smmu_get_resv_regions,
1704 .put_resv_regions = arm_smmu_put_resv_regions,
Will Deacon518f7132014-11-14 17:17:54 +00001705 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001706};
1707
1708static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1709{
1710 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001711 int i;
Peng Fan3ca37122016-05-03 21:50:30 +08001712 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001713
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001714 /* clear global FSR */
1715 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1716 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001717
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001718 /*
1719 * Reset stream mapping groups: Initial values mark all SMRn as
1720 * invalid and all S2CRn as bypass unless overridden.
1721 */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001722 for (i = 0; i < smmu->num_mapping_groups; ++i)
1723 arm_smmu_write_sme(smmu, i);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001724
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301725 if (smmu->model == ARM_MMU500) {
1726 /*
1727 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1728 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1729 * bit is only present in MMU-500r2 onwards.
1730 */
1731 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1732 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
Peng Fan3ca37122016-05-03 21:50:30 +08001733 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301734 if (major >= 2)
1735 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1736 /*
1737 * Allow unmatched Stream IDs to allocate bypass
1738 * TLB entries for reduced latency.
1739 */
Feng Kan74f55d32017-10-11 15:08:39 -07001740 reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN;
Peng Fan3ca37122016-05-03 21:50:30 +08001741 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1742 }
1743
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001744 /* Make sure all context banks are disabled and clear CB_FSR */
1745 for (i = 0; i < smmu->num_context_banks; ++i) {
Robin Murphy90df3732017-08-08 14:56:14 +01001746 void __iomem *cb_base = ARM_SMMU_CB(smmu, i);
1747
1748 arm_smmu_write_context_bank(smmu, i);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001749 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001750 /*
1751 * Disable MMU-500's not-particularly-beneficial next-page
1752 * prefetcher for the sake of errata #841119 and #826419.
1753 */
1754 if (smmu->model == ARM_MMU500) {
1755 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1756 reg &= ~ARM_MMU500_ACTLR_CPRE;
1757 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1758 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001759 }
Will Deacon1463fe42013-07-31 19:21:27 +01001760
Will Deacon45ae7cf2013-06-24 18:31:25 +01001761 /* Invalidate the TLB, just in case */
Robin Murphy4e4abae2019-06-03 14:15:37 +02001762 writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1763 writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001764
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001765 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001766
Will Deacon45ae7cf2013-06-24 18:31:25 +01001767 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001768 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001769
1770 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001771 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001772
Robin Murphy25a1c962016-02-10 14:25:33 +00001773 /* Enable client access, handling unmatched streams as appropriate */
1774 reg &= ~sCR0_CLIENTPD;
1775 if (disable_bypass)
1776 reg |= sCR0_USFCFG;
1777 else
1778 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001779
1780 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001781 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001782
1783 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001784 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001785
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001786 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1787 reg |= sCR0_VMID16EN;
1788
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001789 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1790 reg |= sCR0_EXIDENABLE;
1791
Will Deacon45ae7cf2013-06-24 18:31:25 +01001792 /* Push the button */
Robin Murphy11febfc2017-03-30 17:56:31 +01001793 arm_smmu_tlb_sync_global(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001794 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001795}
1796
1797static int arm_smmu_id_size_to_bits(int size)
1798{
1799 switch (size) {
1800 case 0:
1801 return 32;
1802 case 1:
1803 return 36;
1804 case 2:
1805 return 40;
1806 case 3:
1807 return 42;
1808 case 4:
1809 return 44;
1810 case 5:
1811 default:
1812 return 48;
1813 }
1814}
1815
1816static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1817{
1818 unsigned long size;
1819 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1820 u32 id;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001821 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001822 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001823
1824 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001825 dev_notice(smmu->dev, "SMMUv%d with:\n",
1826 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001827
1828 /* ID0 */
1829 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001830
1831 /* Restrict available stages based on module parameter */
1832 if (force_stage == 1)
1833 id &= ~(ID0_S2TS | ID0_NTS);
1834 else if (force_stage == 2)
1835 id &= ~(ID0_S1TS | ID0_NTS);
1836
Will Deacon45ae7cf2013-06-24 18:31:25 +01001837 if (id & ID0_S1TS) {
1838 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1839 dev_notice(smmu->dev, "\tstage 1 translation\n");
1840 }
1841
1842 if (id & ID0_S2TS) {
1843 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1844 dev_notice(smmu->dev, "\tstage 2 translation\n");
1845 }
1846
1847 if (id & ID0_NTS) {
1848 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1849 dev_notice(smmu->dev, "\tnested translation\n");
1850 }
1851
1852 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001853 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001854 dev_err(smmu->dev, "\tno translation support!\n");
1855 return -ENODEV;
1856 }
1857
Robin Murphyb7862e32016-04-13 18:13:03 +01001858 if ((id & ID0_S1TS) &&
1859 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001860 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1861 dev_notice(smmu->dev, "\taddress translation ops\n");
1862 }
1863
Robin Murphybae2c2d2015-07-29 19:46:05 +01001864 /*
1865 * In order for DMA API calls to work properly, we must defer to what
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001866 * the FW says about coherency, regardless of what the hardware claims.
Robin Murphybae2c2d2015-07-29 19:46:05 +01001867 * Fortunately, this also opens up a workaround for systems where the
1868 * ID register value has ended up configured incorrectly.
1869 */
Robin Murphybae2c2d2015-07-29 19:46:05 +01001870 cttw_reg = !!(id & ID0_CTTW);
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001871 if (cttw_fw || cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001872 dev_notice(smmu->dev, "\t%scoherent table walk\n",
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001873 cttw_fw ? "" : "non-");
1874 if (cttw_fw != cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001875 dev_notice(smmu->dev,
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001876 "\t(IDR0.CTTW overridden by FW configuration)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001877
Robin Murphy21174242016-09-12 17:13:48 +01001878 /* Max. number of entries we have for stream matching/indexing */
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001879 if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
1880 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1881 size = 1 << 16;
1882 } else {
1883 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
1884 }
Robin Murphy21174242016-09-12 17:13:48 +01001885 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001886 if (id & ID0_SMS) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001887 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy21174242016-09-12 17:13:48 +01001888 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
1889 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001890 dev_err(smmu->dev,
1891 "stream-matching supported, but no SMRs present!\n");
1892 return -ENODEV;
1893 }
1894
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001895 /* Zero-initialised to mark as invalid */
1896 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1897 GFP_KERNEL);
1898 if (!smmu->smrs)
1899 return -ENOMEM;
1900
Will Deacon45ae7cf2013-06-24 18:31:25 +01001901 dev_notice(smmu->dev,
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001902 "\tstream matching with %lu register groups", size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001903 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001904 /* s2cr->type == 0 means translation, so initialise explicitly */
1905 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1906 GFP_KERNEL);
1907 if (!smmu->s2crs)
1908 return -ENOMEM;
1909 for (i = 0; i < size; i++)
1910 smmu->s2crs[i] = s2cr_init_val;
1911
Robin Murphy21174242016-09-12 17:13:48 +01001912 smmu->num_mapping_groups = size;
Robin Murphy588888a2016-09-12 17:13:54 +01001913 mutex_init(&smmu->stream_map_mutex);
Will Deacon8e517e72017-07-06 15:55:48 +01001914 spin_lock_init(&smmu->global_sync_lock);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001915
Robin Murphy7602b872016-04-28 17:12:09 +01001916 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1917 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1918 if (!(id & ID0_PTFS_NO_AARCH32S))
1919 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1920 }
1921
Will Deacon45ae7cf2013-06-24 18:31:25 +01001922 /* ID1 */
1923 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001924 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001925
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001926 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001927 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Robin Murphy452107c2017-03-30 17:56:30 +01001928 size <<= smmu->pgshift;
1929 if (smmu->cb_base != gr0_base + size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001930 dev_warn(smmu->dev,
Robin Murphy452107c2017-03-30 17:56:30 +01001931 "SMMU address space size (0x%lx) differs from mapped region size (0x%tx)!\n",
1932 size * 2, (smmu->cb_base - gr0_base) * 2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001933
Will Deacon518f7132014-11-14 17:17:54 +00001934 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001935 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1936 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1937 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1938 return -ENODEV;
1939 }
1940 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1941 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001942 /*
1943 * Cavium CN88xx erratum #27704.
1944 * Ensure ASID and VMID allocation is unique across all SMMUs in
1945 * the system.
1946 */
1947 if (smmu->model == CAVIUM_SMMUV2) {
1948 smmu->cavium_id_base =
1949 atomic_add_return(smmu->num_context_banks,
1950 &cavium_smmu_context_count);
1951 smmu->cavium_id_base -= smmu->num_context_banks;
Robert Richter53c35dce2017-03-13 11:39:01 +01001952 dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
Robin Murphye086d912016-04-13 18:12:58 +01001953 }
Robin Murphy90df3732017-08-08 14:56:14 +01001954 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
1955 sizeof(*smmu->cbs), GFP_KERNEL);
1956 if (!smmu->cbs)
1957 return -ENOMEM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001958
1959 /* ID2 */
1960 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1961 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001962 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001963
Will Deacon518f7132014-11-14 17:17:54 +00001964 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001965 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001966 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001967
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001968 if (id & ID2_VMID16)
1969 smmu->features |= ARM_SMMU_FEAT_VMID16;
1970
Robin Murphyf1d84542015-03-04 16:41:05 +00001971 /*
1972 * What the page table walker can address actually depends on which
1973 * descriptor format is in use, but since a) we don't know that yet,
1974 * and b) it can vary per context bank, this will have to do...
1975 */
1976 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1977 dev_warn(smmu->dev,
1978 "failed to set DMA mask for table walker\n");
1979
Robin Murphyb7862e32016-04-13 18:13:03 +01001980 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001981 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001982 if (smmu->version == ARM_SMMU_V1_64K)
1983 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001984 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001985 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00001986 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001987 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001988 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001989 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001990 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001991 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001992 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001993 }
1994
Robin Murphy7602b872016-04-28 17:12:09 +01001995 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01001996 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01001997 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01001998 if (smmu->features &
1999 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01002000 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01002001 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01002002 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01002003 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01002004 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01002005
Robin Murphyd5466352016-05-09 17:20:09 +01002006 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2007 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2008 else
2009 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2010 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
2011 smmu->pgsize_bitmap);
2012
Will Deacon518f7132014-11-14 17:17:54 +00002013
Will Deacon28d60072014-09-01 16:24:48 +01002014 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
2015 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002016 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002017
2018 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
2019 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002020 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002021
Will Deacon45ae7cf2013-06-24 18:31:25 +01002022 return 0;
2023}
2024
Robin Murphy67b65a32016-04-13 18:12:57 +01002025struct arm_smmu_match_data {
2026 enum arm_smmu_arch_version version;
2027 enum arm_smmu_implementation model;
2028};
2029
2030#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
Sricharan R96a299d2018-12-04 11:52:09 +05302031static const struct arm_smmu_match_data name = { .version = ver, .model = imp }
Robin Murphy67b65a32016-04-13 18:12:57 +01002032
2033ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
2034ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01002035ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002036ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01002037ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Vivek Gautam89cddc52018-12-04 11:52:13 +05302038ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01002039
Joerg Roedel09b52692014-10-02 12:24:45 +02002040static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01002041 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
2042 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
2043 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01002044 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002045 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01002046 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Vivek Gautam89cddc52018-12-04 11:52:13 +05302047 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01002048 { },
2049};
Robin Murphy09360402014-08-28 17:51:59 +01002050
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002051#ifdef CONFIG_ACPI
2052static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
2053{
2054 int ret = 0;
2055
2056 switch (model) {
2057 case ACPI_IORT_SMMU_V1:
2058 case ACPI_IORT_SMMU_CORELINK_MMU400:
2059 smmu->version = ARM_SMMU_V1;
2060 smmu->model = GENERIC_SMMU;
2061 break;
Robin Murphy84c24372017-06-19 16:41:56 +01002062 case ACPI_IORT_SMMU_CORELINK_MMU401:
2063 smmu->version = ARM_SMMU_V1_64K;
2064 smmu->model = GENERIC_SMMU;
2065 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002066 case ACPI_IORT_SMMU_V2:
2067 smmu->version = ARM_SMMU_V2;
2068 smmu->model = GENERIC_SMMU;
2069 break;
2070 case ACPI_IORT_SMMU_CORELINK_MMU500:
2071 smmu->version = ARM_SMMU_V2;
2072 smmu->model = ARM_MMU500;
2073 break;
Robin Murphy84c24372017-06-19 16:41:56 +01002074 case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
2075 smmu->version = ARM_SMMU_V2;
2076 smmu->model = CAVIUM_SMMUV2;
2077 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002078 default:
2079 ret = -ENODEV;
2080 }
2081
2082 return ret;
2083}
2084
2085static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2086 struct arm_smmu_device *smmu)
2087{
2088 struct device *dev = smmu->dev;
2089 struct acpi_iort_node *node =
2090 *(struct acpi_iort_node **)dev_get_platdata(dev);
2091 struct acpi_iort_smmu *iort_smmu;
2092 int ret;
2093
2094 /* Retrieve SMMU1/2 specific data */
2095 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
2096
2097 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
2098 if (ret < 0)
2099 return ret;
2100
2101 /* Ignore the configuration access interrupt */
2102 smmu->num_global_irqs = 1;
2103
2104 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
2105 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2106
2107 return 0;
2108}
2109#else
2110static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2111 struct arm_smmu_device *smmu)
2112{
2113 return -ENODEV;
2114}
2115#endif
2116
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002117static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2118 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002119{
Robin Murphy67b65a32016-04-13 18:12:57 +01002120 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002121 struct device *dev = &pdev->dev;
Robin Murphy021bb842016-09-14 15:26:46 +01002122 bool legacy_binding;
2123
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002124 if (of_property_read_u32(dev->of_node, "#global-interrupts",
2125 &smmu->num_global_irqs)) {
2126 dev_err(dev, "missing #global-interrupts property\n");
2127 return -ENODEV;
2128 }
2129
2130 data = of_device_get_match_data(dev);
2131 smmu->version = data->version;
2132 smmu->model = data->model;
2133
2134 parse_driver_options(smmu);
2135
Robin Murphy021bb842016-09-14 15:26:46 +01002136 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
2137 if (legacy_binding && !using_generic_binding) {
2138 if (!using_legacy_binding)
2139 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
2140 using_legacy_binding = true;
2141 } else if (!legacy_binding && !using_legacy_binding) {
2142 using_generic_binding = true;
2143 } else {
2144 dev_err(dev, "not probing due to mismatched DT properties\n");
2145 return -ENODEV;
2146 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002147
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002148 if (of_dma_is_coherent(dev->of_node))
2149 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2150
2151 return 0;
2152}
2153
Robin Murphyf6810c12017-04-10 16:51:05 +05302154static void arm_smmu_bus_init(void)
2155{
2156 /* Oh, for a proper bus abstraction */
2157 if (!iommu_present(&platform_bus_type))
2158 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2159#ifdef CONFIG_ARM_AMBA
2160 if (!iommu_present(&amba_bustype))
2161 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2162#endif
2163#ifdef CONFIG_PCI
2164 if (!iommu_present(&pci_bus_type)) {
2165 pci_request_acs();
2166 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2167 }
2168#endif
Nipun Guptaeab03e22018-09-10 19:19:18 +05302169#ifdef CONFIG_FSL_MC_BUS
2170 if (!iommu_present(&fsl_mc_bus_type))
2171 bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
2172#endif
Robin Murphyf6810c12017-04-10 16:51:05 +05302173}
2174
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002175static int arm_smmu_device_probe(struct platform_device *pdev)
2176{
2177 struct resource *res;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002178 resource_size_t ioaddr;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002179 struct arm_smmu_device *smmu;
2180 struct device *dev = &pdev->dev;
2181 int num_irqs, i, err;
2182
Will Deacon45ae7cf2013-06-24 18:31:25 +01002183 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2184 if (!smmu) {
2185 dev_err(dev, "failed to allocate arm_smmu_device\n");
2186 return -ENOMEM;
2187 }
2188 smmu->dev = dev;
2189
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002190 if (dev->of_node)
2191 err = arm_smmu_device_dt_probe(pdev, smmu);
2192 else
2193 err = arm_smmu_device_acpi_probe(pdev, smmu);
2194
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002195 if (err)
2196 return err;
Robin Murphy09360402014-08-28 17:51:59 +01002197
Will Deacon45ae7cf2013-06-24 18:31:25 +01002198 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002199 ioaddr = res->start;
Julia Lawall8a7f4312013-08-19 12:20:37 +01002200 smmu->base = devm_ioremap_resource(dev, res);
2201 if (IS_ERR(smmu->base))
2202 return PTR_ERR(smmu->base);
Robin Murphy452107c2017-03-30 17:56:30 +01002203 smmu->cb_base = smmu->base + resource_size(res) / 2;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002204
Will Deacon45ae7cf2013-06-24 18:31:25 +01002205 num_irqs = 0;
2206 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2207 num_irqs++;
2208 if (num_irqs > smmu->num_global_irqs)
2209 smmu->num_context_irqs++;
2210 }
2211
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002212 if (!smmu->num_context_irqs) {
2213 dev_err(dev, "found %d interrupts but expected at least %d\n",
2214 num_irqs, smmu->num_global_irqs + 1);
2215 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002216 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002217
Kees Cooka86854d2018-06-12 14:07:58 -07002218 smmu->irqs = devm_kcalloc(dev, num_irqs, sizeof(*smmu->irqs),
Will Deacon45ae7cf2013-06-24 18:31:25 +01002219 GFP_KERNEL);
2220 if (!smmu->irqs) {
2221 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2222 return -ENOMEM;
2223 }
2224
2225 for (i = 0; i < num_irqs; ++i) {
2226 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002227
Will Deacon45ae7cf2013-06-24 18:31:25 +01002228 if (irq < 0) {
2229 dev_err(dev, "failed to get irq index %d\n", i);
2230 return -ENODEV;
2231 }
2232 smmu->irqs[i] = irq;
2233 }
2234
Sricharan R96a299d2018-12-04 11:52:09 +05302235 err = devm_clk_bulk_get_all(dev, &smmu->clks);
2236 if (err < 0) {
2237 dev_err(dev, "failed to get clocks %d\n", err);
2238 return err;
2239 }
2240 smmu->num_clks = err;
2241
2242 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
2243 if (err)
2244 return err;
2245
Olav Haugan3c8766d2014-08-22 17:12:32 -07002246 err = arm_smmu_device_cfg_probe(smmu);
2247 if (err)
2248 return err;
2249
Vivek Gautamd1e20222018-07-19 23:23:56 +05302250 if (smmu->version == ARM_SMMU_V2) {
2251 if (smmu->num_context_banks > smmu->num_context_irqs) {
2252 dev_err(dev,
2253 "found only %d context irq(s) but %d required\n",
2254 smmu->num_context_irqs, smmu->num_context_banks);
2255 return -ENODEV;
2256 }
2257
2258 /* Ignore superfluous interrupts */
2259 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002260 }
2261
Will Deacon45ae7cf2013-06-24 18:31:25 +01002262 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08002263 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2264 arm_smmu_global_fault,
2265 IRQF_SHARED,
2266 "arm-smmu global fault",
2267 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002268 if (err) {
2269 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2270 i, smmu->irqs[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01002271 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002272 }
2273 }
2274
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002275 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2276 "smmu.%pa", &ioaddr);
2277 if (err) {
2278 dev_err(dev, "Failed to register iommu in sysfs\n");
2279 return err;
2280 }
2281
2282 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2283 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2284
2285 err = iommu_device_register(&smmu->iommu);
2286 if (err) {
2287 dev_err(dev, "Failed to register iommu\n");
2288 return err;
2289 }
2290
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002291 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01002292 arm_smmu_device_reset(smmu);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03002293 arm_smmu_test_smr_masks(smmu);
Robin Murphy021bb842016-09-14 15:26:46 +01002294
Robin Murphyf6810c12017-04-10 16:51:05 +05302295 /*
Sricharan Rd4a44f02018-12-04 11:52:10 +05302296 * We want to avoid touching dev->power.lock in fastpaths unless
2297 * it's really going to do something useful - pm_runtime_enabled()
2298 * can serve as an ideal proxy for that decision. So, conditionally
2299 * enable pm_runtime.
2300 */
2301 if (dev->pm_domain) {
2302 pm_runtime_set_active(dev);
2303 pm_runtime_enable(dev);
2304 }
2305
2306 /*
Robin Murphyf6810c12017-04-10 16:51:05 +05302307 * For ACPI and generic DT bindings, an SMMU will be probed before
2308 * any device which might need it, so we want the bus ops in place
2309 * ready to handle default domain setup as soon as any SMMU exists.
2310 */
2311 if (!using_legacy_binding)
2312 arm_smmu_bus_init();
2313
Will Deacon45ae7cf2013-06-24 18:31:25 +01002314 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002315}
2316
Robin Murphyf6810c12017-04-10 16:51:05 +05302317/*
2318 * With the legacy DT binding in play, though, we have no guarantees about
2319 * probe order, but then we're also not doing default domains, so we can
2320 * delay setting bus ops until we're sure every possible SMMU is ready,
2321 * and that way ensure that no add_device() calls get missed.
2322 */
2323static int arm_smmu_legacy_bus_init(void)
2324{
2325 if (using_legacy_binding)
2326 arm_smmu_bus_init();
2327 return 0;
2328}
2329device_initcall_sync(arm_smmu_legacy_bus_init);
2330
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002331static void arm_smmu_device_shutdown(struct platform_device *pdev)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002332{
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002333 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002334
2335 if (!smmu)
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002336 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002337
Will Deaconecfadb62013-07-31 19:21:28 +01002338 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002339 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002340
Sricharan Rd4a44f02018-12-04 11:52:10 +05302341 arm_smmu_rpm_get(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002342 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002343 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Sricharan Rd4a44f02018-12-04 11:52:10 +05302344 arm_smmu_rpm_put(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302345
Sricharan Rd4a44f02018-12-04 11:52:10 +05302346 if (pm_runtime_enabled(smmu->dev))
2347 pm_runtime_force_suspend(smmu->dev);
2348 else
2349 clk_bulk_disable(smmu->num_clks, smmu->clks);
2350
2351 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
Nate Watterson7aa86192017-06-29 18:18:15 -04002352}
2353
Sricharan R96a299d2018-12-04 11:52:09 +05302354static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
Robin Murphya2d866f2017-08-08 14:56:15 +01002355{
2356 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
Sricharan R96a299d2018-12-04 11:52:09 +05302357 int ret;
2358
2359 ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
2360 if (ret)
2361 return ret;
Robin Murphya2d866f2017-08-08 14:56:15 +01002362
2363 arm_smmu_device_reset(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302364
Will Deacon45ae7cf2013-06-24 18:31:25 +01002365 return 0;
2366}
2367
Sricharan R96a299d2018-12-04 11:52:09 +05302368static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
Dan Carpenter6614ee72013-08-21 09:34:20 +01002369{
Sricharan R96a299d2018-12-04 11:52:09 +05302370 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2371
2372 clk_bulk_disable(smmu->num_clks, smmu->clks);
2373
2374 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002375}
2376
Robin Murphya2d866f2017-08-08 14:56:15 +01002377static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
2378{
Sricharan R96a299d2018-12-04 11:52:09 +05302379 if (pm_runtime_suspended(dev))
2380 return 0;
Robin Murphya2d866f2017-08-08 14:56:15 +01002381
Sricharan R96a299d2018-12-04 11:52:09 +05302382 return arm_smmu_runtime_resume(dev);
Robin Murphya2d866f2017-08-08 14:56:15 +01002383}
2384
Sricharan R96a299d2018-12-04 11:52:09 +05302385static int __maybe_unused arm_smmu_pm_suspend(struct device *dev)
2386{
2387 if (pm_runtime_suspended(dev))
2388 return 0;
2389
2390 return arm_smmu_runtime_suspend(dev);
2391}
2392
2393static const struct dev_pm_ops arm_smmu_pm_ops = {
2394 SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_pm_suspend, arm_smmu_pm_resume)
2395 SET_RUNTIME_PM_OPS(arm_smmu_runtime_suspend,
2396 arm_smmu_runtime_resume, NULL)
2397};
Robin Murphya2d866f2017-08-08 14:56:15 +01002398
Will Deacon45ae7cf2013-06-24 18:31:25 +01002399static struct platform_driver arm_smmu_driver = {
2400 .driver = {
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002401 .name = "arm-smmu",
2402 .of_match_table = of_match_ptr(arm_smmu_of_match),
2403 .pm = &arm_smmu_pm_ops,
2404 .suppress_bind_attrs = true,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002405 },
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002406 .probe = arm_smmu_device_probe,
Nate Watterson7aa86192017-06-29 18:18:15 -04002407 .shutdown = arm_smmu_device_shutdown,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002408};
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002409builtin_platform_driver(arm_smmu_driver);