blob: 07a267c437d649ebc19828149bd709f5d6d8d90f [file] [log] [blame]
Thomas Gleixner45051532019-05-29 16:57:47 -07001// SPDX-License-Identifier: GPL-2.0-only
Will Deacon45ae7cf2013-06-24 18:31:25 +01002/*
3 * IOMMU API for ARM architected SMMU implementations.
4 *
Will Deacon45ae7cf2013-06-24 18:31:25 +01005 * Copyright (C) 2013 ARM Limited
6 *
7 * Author: Will Deacon <will.deacon@arm.com>
8 *
9 * This driver currently supports:
10 * - SMMUv1 and v2 implementations
11 * - Stream-matching and stream-indexing
12 * - v7/v8 long-descriptor format
13 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010014 * - Context fault reporting
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +030015 * - Extended Stream ID (16 bit)
Will Deacon45ae7cf2013-06-24 18:31:25 +010016 */
17
18#define pr_fmt(fmt) "arm-smmu: " fmt
19
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +000020#include <linux/acpi.h>
21#include <linux/acpi_iort.h>
Robin Murphy1f3d5ca2016-09-12 17:13:49 +010022#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010023#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000024#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010025#include <linux/dma-mapping.h>
26#include <linux/err.h>
27#include <linux/interrupt.h>
28#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010029#include <linux/io-64-nonatomic-hi-lo.h>
Rob Herringb77cf112019-02-05 10:37:31 -060030#include <linux/io-pgtable.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010031#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000032#include <linux/iopoll.h>
Paul Gortmakeraddb672f2018-12-01 14:19:16 -050033#include <linux/init.h>
34#include <linux/moduleparam.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010035#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010036#include <linux/of_address.h>
Robin Murphyd6fc5d92016-09-12 17:13:52 +010037#include <linux/of_device.h>
Robin Murphyadfec2e2016-09-12 17:13:55 +010038#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010039#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010040#include <linux/platform_device.h>
Sricharan R96a299d2018-12-04 11:52:09 +053041#include <linux/pm_runtime.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010042#include <linux/slab.h>
43#include <linux/spinlock.h>
44
45#include <linux/amba/bus.h>
Nipun Guptaeab03e22018-09-10 19:19:18 +053046#include <linux/fsl/mc.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010047
Rob Clark2b037742017-08-09 10:43:03 -040048#include "arm-smmu-regs.h"
49
Robin Murphy4e4abae2019-06-03 14:15:37 +020050/*
51 * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
52 * global register space are still, in fact, using a hypervisor to mediate it
53 * by trapping and emulating register accesses. Sadly, some deployed versions
54 * of said trapping code have bugs wherein they go horribly wrong for stores
55 * using r31 (i.e. XZR/WZR) as the source register.
56 */
57#define QCOM_DUMMY_VAL -1
58
Rob Clark2b037742017-08-09 10:43:03 -040059#define ARM_MMU500_ACTLR_CPRE (1 << 1)
60
61#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
Feng Kan74f55d32017-10-11 15:08:39 -070062#define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10)
Rob Clark2b037742017-08-09 10:43:03 -040063#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
64
65#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
66#define TLB_SPIN_COUNT 10
Will Deacon45ae7cf2013-06-24 18:31:25 +010067
Will Deacon45ae7cf2013-06-24 18:31:25 +010068/* Maximum number of context banks per SMMU */
69#define ARM_SMMU_MAX_CBS 128
70
Will Deacon45ae7cf2013-06-24 18:31:25 +010071/* SMMU global address space */
72#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010073#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010074
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000075/*
76 * SMMU global address space with conditional offset to access secure
77 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
78 * nsGFSYNR0: 0x450)
79 */
80#define ARM_SMMU_GR0_NS(smmu) \
81 ((smmu)->base + \
82 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
83 ? 0x400 : 0))
84
Robin Murphyf9a05f02016-04-13 18:13:01 +010085/*
86 * Some 64-bit registers only make sense to write atomically, but in such
87 * cases all the data relevant to AArch32 formats lies within the lower word,
88 * therefore this actually makes more sense than it might first appear.
89 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010090#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010091#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010092#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010093#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010094#endif
95
Will Deacon45ae7cf2013-06-24 18:31:25 +010096/* Translation context bank */
Robin Murphy452107c2017-03-30 17:56:30 +010097#define ARM_SMMU_CB(smmu, n) ((smmu)->cb_base + ((n) << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010098
Eric Augerf3ebee82017-01-19 20:57:55 +000099#define MSI_IOVA_BASE 0x8000000
100#define MSI_IOVA_LENGTH 0x100000
101
Will Deacon4cf740b2014-07-14 19:47:39 +0100102static int force_stage;
Paul Gortmakeraddb672f2018-12-01 14:19:16 -0500103/*
104 * not really modular, but the easiest way to keep compat with existing
105 * bootargs behaviour is to continue using module_param() here.
106 */
Robin Murphy25a1c962016-02-10 14:25:33 +0000107module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100108MODULE_PARM_DESC(force_stage,
109 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Douglas Anderson954a03b2019-03-01 11:20:17 -0800110static bool disable_bypass =
111 IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT);
Robin Murphy25a1c962016-02-10 14:25:33 +0000112module_param(disable_bypass, bool, S_IRUGO);
113MODULE_PARM_DESC(disable_bypass,
114 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100115
Robin Murphy09360402014-08-28 17:51:59 +0100116enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100117 ARM_SMMU_V1,
118 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100119 ARM_SMMU_V2,
120};
121
Robin Murphy67b65a32016-04-13 18:12:57 +0100122enum arm_smmu_implementation {
123 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100124 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100125 CAVIUM_SMMUV2,
Vivek Gautam89cddc52018-12-04 11:52:13 +0530126 QCOM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100127};
128
Robin Murphy8e8b2032016-09-12 17:13:50 +0100129struct arm_smmu_s2cr {
Robin Murphy588888a2016-09-12 17:13:54 +0100130 struct iommu_group *group;
131 int count;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100132 enum arm_smmu_s2cr_type type;
133 enum arm_smmu_s2cr_privcfg privcfg;
134 u8 cbndx;
135};
136
137#define s2cr_init_val (struct arm_smmu_s2cr){ \
138 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
139}
140
Will Deacon45ae7cf2013-06-24 18:31:25 +0100141struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100142 u16 mask;
143 u16 id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100144 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100145};
146
Robin Murphy90df3732017-08-08 14:56:14 +0100147struct arm_smmu_cb {
148 u64 ttbr[2];
149 u32 tcr[2];
150 u32 mair[2];
151 struct arm_smmu_cfg *cfg;
152};
153
Will Deacona9a1b0b2014-05-01 18:05:08 +0100154struct arm_smmu_master_cfg {
Robin Murphyf80cd882016-09-14 15:21:39 +0100155 struct arm_smmu_device *smmu;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100156 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100157};
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100158#define INVALID_SMENDX -1
Robin Murphyadfec2e2016-09-12 17:13:55 +0100159#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
160#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000161#define fwspec_smendx(fw, i) \
162 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphyadfec2e2016-09-12 17:13:55 +0100163#define for_each_cfg_sme(fw, i, idx) \
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000164 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100165
166struct arm_smmu_device {
167 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100168
169 void __iomem *base;
Robin Murphy452107c2017-03-30 17:56:30 +0100170 void __iomem *cb_base;
Will Deaconc757e852014-07-30 11:33:25 +0100171 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100172
173#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
174#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
175#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
176#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
177#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000178#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800179#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100180#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
181#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
182#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
183#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
184#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300185#define ARM_SMMU_FEAT_EXIDS (1 << 12)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100186 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000187
188#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
189 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100190 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100191 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100192
193 u32 num_context_banks;
194 u32 num_s2_context_banks;
195 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
Robin Murphy90df3732017-08-08 14:56:14 +0100196 struct arm_smmu_cb *cbs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100197 atomic_t irptndx;
198
199 u32 num_mapping_groups;
Robin Murphy21174242016-09-12 17:13:48 +0100200 u16 streamid_mask;
201 u16 smr_mask_mask;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100202 struct arm_smmu_smr *smrs;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100203 struct arm_smmu_s2cr *s2crs;
Robin Murphy588888a2016-09-12 17:13:54 +0100204 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100205
Will Deacon518f7132014-11-14 17:17:54 +0000206 unsigned long va_size;
207 unsigned long ipa_size;
208 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100209 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100210
211 u32 num_global_irqs;
212 u32 num_context_irqs;
213 unsigned int *irqs;
Sricharan R96a299d2018-12-04 11:52:09 +0530214 struct clk_bulk_data *clks;
215 int num_clks;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100216
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800217 u32 cavium_id_base; /* Specific to Cavium */
Joerg Roedel9648cbc2017-02-01 18:11:36 +0100218
Will Deacon8e517e72017-07-06 15:55:48 +0100219 spinlock_t global_sync_lock;
220
Joerg Roedel9648cbc2017-02-01 18:11:36 +0100221 /* IOMMU core code handle */
222 struct iommu_device iommu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100223};
224
Robin Murphy7602b872016-04-28 17:12:09 +0100225enum arm_smmu_context_fmt {
226 ARM_SMMU_CTX_FMT_NONE,
227 ARM_SMMU_CTX_FMT_AARCH64,
228 ARM_SMMU_CTX_FMT_AARCH32_L,
229 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100230};
231
232struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100233 u8 cbndx;
234 u8 irptndx;
Robin Murphy280b6832017-03-30 17:56:29 +0100235 union {
236 u16 asid;
237 u16 vmid;
238 };
Will Deacon45ae7cf2013-06-24 18:31:25 +0100239 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100240 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100241};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100242#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100243
Will Deaconc752ce42014-06-25 22:46:31 +0100244enum arm_smmu_domain_stage {
245 ARM_SMMU_DOMAIN_S1 = 0,
246 ARM_SMMU_DOMAIN_S2,
247 ARM_SMMU_DOMAIN_NESTED,
Will Deacon61bc6712017-01-06 16:56:03 +0000248 ARM_SMMU_DOMAIN_BYPASS,
Will Deaconc752ce42014-06-25 22:46:31 +0100249};
250
Will Deaconabfd6fe2019-07-02 16:44:41 +0100251struct arm_smmu_flush_ops {
252 struct iommu_flush_ops tlb;
253 void (*tlb_inv_range)(unsigned long iova, size_t size, size_t granule,
Will Deacone953f7f2019-07-02 16:44:50 +0100254 bool leaf, void *cookie);
255 void (*tlb_sync)(void *cookie);
Will Deaconabfd6fe2019-07-02 16:44:41 +0100256};
257
Will Deacon45ae7cf2013-06-24 18:31:25 +0100258struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100259 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000260 struct io_pgtable_ops *pgtbl_ops;
Will Deaconabfd6fe2019-07-02 16:44:41 +0100261 const struct arm_smmu_flush_ops *flush_ops;
Will Deacon44680ee2014-06-25 11:29:12 +0100262 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100263 enum arm_smmu_domain_stage stage;
Robin Murphy44f68762018-09-20 17:10:27 +0100264 bool non_strict;
Will Deacon518f7132014-11-14 17:17:54 +0000265 struct mutex init_mutex; /* Protects smmu pointer */
Will Deacon8e517e72017-07-06 15:55:48 +0100266 spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
Joerg Roedel1d672632015-03-26 13:43:10 +0100267 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100268};
269
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000270struct arm_smmu_option_prop {
271 u32 opt;
272 const char *prop;
273};
274
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800275static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
276
Robin Murphy021bb842016-09-14 15:26:46 +0100277static bool using_legacy_binding, using_generic_binding;
278
Mitchel Humpherys29073202014-07-08 09:52:18 -0700279static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000280 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
281 { 0, NULL},
282};
283
Sricharan Rd4a44f02018-12-04 11:52:10 +0530284static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
285{
286 if (pm_runtime_enabled(smmu->dev))
287 return pm_runtime_get_sync(smmu->dev);
288
289 return 0;
290}
291
292static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
293{
294 if (pm_runtime_enabled(smmu->dev))
295 pm_runtime_put(smmu->dev);
296}
297
Joerg Roedel1d672632015-03-26 13:43:10 +0100298static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
299{
300 return container_of(dom, struct arm_smmu_domain, domain);
301}
302
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000303static void parse_driver_options(struct arm_smmu_device *smmu)
304{
305 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700306
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000307 do {
308 if (of_property_read_bool(smmu->dev->of_node,
309 arm_smmu_options[i].prop)) {
310 smmu->options |= arm_smmu_options[i].opt;
311 dev_notice(smmu->dev, "option %s\n",
312 arm_smmu_options[i].prop);
313 }
314 } while (arm_smmu_options[++i].opt);
315}
316
Will Deacon8f68f8e2014-07-15 11:27:08 +0100317static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100318{
319 if (dev_is_pci(dev)) {
320 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700321
Will Deacona9a1b0b2014-05-01 18:05:08 +0100322 while (!pci_is_root_bus(bus))
323 bus = bus->parent;
Robin Murphyf80cd882016-09-14 15:21:39 +0100324 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100325 }
326
Robin Murphyf80cd882016-09-14 15:21:39 +0100327 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100328}
329
Robin Murphyf80cd882016-09-14 15:21:39 +0100330static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100331{
Robin Murphyf80cd882016-09-14 15:21:39 +0100332 *((__be32 *)data) = cpu_to_be32(alias);
333 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100334}
335
Robin Murphyf80cd882016-09-14 15:21:39 +0100336static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100337{
Robin Murphyf80cd882016-09-14 15:21:39 +0100338 struct of_phandle_iterator *it = *(void **)data;
339 struct device_node *np = it->node;
340 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100341
Robin Murphyf80cd882016-09-14 15:21:39 +0100342 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
343 "#stream-id-cells", 0)
344 if (it->node == np) {
345 *(void **)data = dev;
346 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700347 }
Robin Murphyf80cd882016-09-14 15:21:39 +0100348 it->node = np;
349 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100350}
351
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100352static struct platform_driver arm_smmu_driver;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100353static struct iommu_ops arm_smmu_ops;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100354
Robin Murphyadfec2e2016-09-12 17:13:55 +0100355static int arm_smmu_register_legacy_master(struct device *dev,
356 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100357{
Robin Murphyadfec2e2016-09-12 17:13:55 +0100358 struct device *smmu_dev;
Robin Murphyf80cd882016-09-14 15:21:39 +0100359 struct device_node *np;
360 struct of_phandle_iterator it;
361 void *data = &it;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100362 u32 *sids;
Robin Murphyf80cd882016-09-14 15:21:39 +0100363 __be32 pci_sid;
364 int err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100365
Robin Murphyf80cd882016-09-14 15:21:39 +0100366 np = dev_get_dev_node(dev);
367 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
368 of_node_put(np);
369 return -ENODEV;
370 }
371
372 it.node = np;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100373 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
374 __find_legacy_master_phandle);
Robin Murphyadfec2e2016-09-12 17:13:55 +0100375 smmu_dev = data;
Robin Murphyf80cd882016-09-14 15:21:39 +0100376 of_node_put(np);
377 if (err == 0)
378 return -ENODEV;
379 if (err < 0)
380 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100381
Robin Murphyf80cd882016-09-14 15:21:39 +0100382 if (dev_is_pci(dev)) {
383 /* "mmu-masters" assumes Stream ID == Requester ID */
384 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
385 &pci_sid);
386 it.cur = &pci_sid;
387 it.cur_count = 1;
388 }
389
Robin Murphyadfec2e2016-09-12 17:13:55 +0100390 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
391 &arm_smmu_ops);
392 if (err)
393 return err;
394
395 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
396 if (!sids)
Robin Murphyf80cd882016-09-14 15:21:39 +0100397 return -ENOMEM;
398
Robin Murphyadfec2e2016-09-12 17:13:55 +0100399 *smmu = dev_get_drvdata(smmu_dev);
400 of_phandle_iterator_args(&it, sids, it.cur_count);
401 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
402 kfree(sids);
403 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100404}
405
406static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
407{
408 int idx;
409
410 do {
411 idx = find_next_zero_bit(map, end, start);
412 if (idx == end)
413 return -ENOSPC;
414 } while (test_and_set_bit(idx, map));
415
416 return idx;
417}
418
419static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
420{
421 clear_bit(idx, map);
422}
423
424/* Wait for any pending TLB invalidations to complete */
Robin Murphy11febfc2017-03-30 17:56:31 +0100425static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
426 void __iomem *sync, void __iomem *status)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100427{
Robin Murphy8513c892017-03-30 17:56:32 +0100428 unsigned int spin_cnt, delay;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100429
Robin Murphy4e4abae2019-06-03 14:15:37 +0200430 writel_relaxed(QCOM_DUMMY_VAL, sync);
Robin Murphy8513c892017-03-30 17:56:32 +0100431 for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
432 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
433 if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
434 return;
435 cpu_relax();
Will Deacon45ae7cf2013-06-24 18:31:25 +0100436 }
Robin Murphy8513c892017-03-30 17:56:32 +0100437 udelay(delay);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100438 }
Robin Murphy8513c892017-03-30 17:56:32 +0100439 dev_err_ratelimited(smmu->dev,
440 "TLB sync timed out -- SMMU may be deadlocked\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +0100441}
442
Robin Murphy11febfc2017-03-30 17:56:31 +0100443static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
Will Deacon1463fe42013-07-31 19:21:27 +0100444{
Robin Murphy11febfc2017-03-30 17:56:31 +0100445 void __iomem *base = ARM_SMMU_GR0(smmu);
Will Deacon8e517e72017-07-06 15:55:48 +0100446 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100447
Will Deacon8e517e72017-07-06 15:55:48 +0100448 spin_lock_irqsave(&smmu->global_sync_lock, flags);
Robin Murphy11febfc2017-03-30 17:56:31 +0100449 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
450 base + ARM_SMMU_GR0_sTLBGSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100451 spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000452}
453
Robin Murphy11febfc2017-03-30 17:56:31 +0100454static void arm_smmu_tlb_sync_context(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100455{
Will Deacon518f7132014-11-14 17:17:54 +0000456 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy11febfc2017-03-30 17:56:31 +0100457 struct arm_smmu_device *smmu = smmu_domain->smmu;
458 void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
Will Deacon8e517e72017-07-06 15:55:48 +0100459 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100460
Will Deacon8e517e72017-07-06 15:55:48 +0100461 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy11febfc2017-03-30 17:56:31 +0100462 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
463 base + ARM_SMMU_CB_TLBSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100464 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000465}
466
Robin Murphy11febfc2017-03-30 17:56:31 +0100467static void arm_smmu_tlb_sync_vmid(void *cookie)
468{
469 struct arm_smmu_domain *smmu_domain = cookie;
470
471 arm_smmu_tlb_sync_global(smmu_domain->smmu);
472}
473
474static void arm_smmu_tlb_inv_context_s1(void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000475{
476 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100477 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy11febfc2017-03-30 17:56:31 +0100478 void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
479
Robin Murphy44f68762018-09-20 17:10:27 +0100480 /*
481 * NOTE: this is not a relaxed write; it needs to guarantee that PTEs
482 * cleared by the current CPU are visible to the SMMU before the TLBI.
483 */
484 writel(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
Robin Murphy11febfc2017-03-30 17:56:31 +0100485 arm_smmu_tlb_sync_context(cookie);
486}
487
488static void arm_smmu_tlb_inv_context_s2(void *cookie)
489{
490 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100491 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy11febfc2017-03-30 17:56:31 +0100492 void __iomem *base = ARM_SMMU_GR0(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100493
Robin Murphy44f68762018-09-20 17:10:27 +0100494 /* NOTE: see above */
495 writel(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
Robin Murphy11febfc2017-03-30 17:56:31 +0100496 arm_smmu_tlb_sync_global(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100497}
498
Will Deacon518f7132014-11-14 17:17:54 +0000499static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000500 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000501{
502 struct arm_smmu_domain *smmu_domain = cookie;
503 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000504 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Robin Murphy11febfc2017-03-30 17:56:31 +0100505 void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000506
Will Deacon7d321bd32018-10-01 12:42:49 +0100507 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
508 wmb();
509
Will Deacon518f7132014-11-14 17:17:54 +0000510 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +0000511 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
512
Robin Murphy7602b872016-04-28 17:12:09 +0100513 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000514 iova &= ~12UL;
Robin Murphy280b6832017-03-30 17:56:29 +0100515 iova |= cfg->asid;
Robin Murphy75df1382015-12-07 18:18:52 +0000516 do {
517 writel_relaxed(iova, reg);
518 iova += granule;
519 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000520 } else {
521 iova >>= 12;
Robin Murphy280b6832017-03-30 17:56:29 +0100522 iova |= (u64)cfg->asid << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000523 do {
524 writeq_relaxed(iova, reg);
525 iova += granule >> 12;
526 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000527 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100528 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000529 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
530 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000531 iova >>= 12;
532 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100533 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000534 iova += granule >> 12;
535 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000536 }
537}
538
Robin Murphy11febfc2017-03-30 17:56:31 +0100539/*
540 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
541 * almost negligible, but the benefit of getting the first one in as far ahead
542 * of the sync as possible is significant, hence we don't just make this a
Will Deacone953f7f2019-07-02 16:44:50 +0100543 * no-op and set .tlb_sync to arm_smmu_tlb_inv_context_s2() as you might think.
Robin Murphy11febfc2017-03-30 17:56:31 +0100544 */
545static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
546 size_t granule, bool leaf, void *cookie)
547{
548 struct arm_smmu_domain *smmu_domain = cookie;
549 void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
550
Will Deacon7d321bd32018-10-01 12:42:49 +0100551 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
552 wmb();
553
Robin Murphy11febfc2017-03-30 17:56:31 +0100554 writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
555}
556
Will Deacon05aed942019-07-02 16:44:25 +0100557static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
558 size_t granule, void *cookie)
559{
560 struct arm_smmu_domain *smmu_domain = cookie;
Will Deaconabfd6fe2019-07-02 16:44:41 +0100561 const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
Will Deacon05aed942019-07-02 16:44:25 +0100562
Will Deaconabfd6fe2019-07-02 16:44:41 +0100563 ops->tlb_inv_range(iova, size, granule, false, cookie);
Will Deacone953f7f2019-07-02 16:44:50 +0100564 ops->tlb_sync(cookie);
Will Deacon05aed942019-07-02 16:44:25 +0100565}
566
567static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
568 size_t granule, void *cookie)
569{
570 struct arm_smmu_domain *smmu_domain = cookie;
Will Deaconabfd6fe2019-07-02 16:44:41 +0100571 const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
Will Deacon05aed942019-07-02 16:44:25 +0100572
Will Deaconabfd6fe2019-07-02 16:44:41 +0100573 ops->tlb_inv_range(iova, size, granule, true, cookie);
Will Deacone953f7f2019-07-02 16:44:50 +0100574 ops->tlb_sync(cookie);
Will Deacon05aed942019-07-02 16:44:25 +0100575}
576
Will Deaconabfd6fe2019-07-02 16:44:41 +0100577static void arm_smmu_tlb_add_page(unsigned long iova, size_t granule,
578 void *cookie)
579{
580 struct arm_smmu_domain *smmu_domain = cookie;
581 const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
582
583 ops->tlb_inv_range(iova, granule, granule, true, cookie);
584}
585
586static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops = {
587 .tlb = {
588 .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
589 .tlb_flush_walk = arm_smmu_tlb_inv_walk,
590 .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
591 .tlb_add_page = arm_smmu_tlb_add_page,
Will Deaconabfd6fe2019-07-02 16:44:41 +0100592 },
593 .tlb_inv_range = arm_smmu_tlb_inv_range_nosync,
Will Deacone953f7f2019-07-02 16:44:50 +0100594 .tlb_sync = arm_smmu_tlb_sync_context,
Robin Murphy11febfc2017-03-30 17:56:31 +0100595};
596
Will Deaconabfd6fe2019-07-02 16:44:41 +0100597static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
598 .tlb = {
599 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
600 .tlb_flush_walk = arm_smmu_tlb_inv_walk,
601 .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
602 .tlb_add_page = arm_smmu_tlb_add_page,
Will Deaconabfd6fe2019-07-02 16:44:41 +0100603 },
604 .tlb_inv_range = arm_smmu_tlb_inv_range_nosync,
Will Deacone953f7f2019-07-02 16:44:50 +0100605 .tlb_sync = arm_smmu_tlb_sync_context,
Robin Murphy11febfc2017-03-30 17:56:31 +0100606};
607
Will Deaconabfd6fe2019-07-02 16:44:41 +0100608static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
609 .tlb = {
610 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
611 .tlb_flush_walk = arm_smmu_tlb_inv_walk,
612 .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
613 .tlb_add_page = arm_smmu_tlb_add_page,
Will Deaconabfd6fe2019-07-02 16:44:41 +0100614 },
615 .tlb_inv_range = arm_smmu_tlb_inv_vmid_nosync,
Will Deacone953f7f2019-07-02 16:44:50 +0100616 .tlb_sync = arm_smmu_tlb_sync_vmid,
Will Deacon518f7132014-11-14 17:17:54 +0000617};
618
Will Deacon45ae7cf2013-06-24 18:31:25 +0100619static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
620{
Vivek Gautambc580b52019-04-22 12:40:36 +0530621 u32 fsr, fsynr, cbfrsynra;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100622 unsigned long iova;
623 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100624 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100625 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
626 struct arm_smmu_device *smmu = smmu_domain->smmu;
Vivek Gautambc580b52019-04-22 12:40:36 +0530627 void __iomem *gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100628 void __iomem *cb_base;
629
Robin Murphy452107c2017-03-30 17:56:30 +0100630 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100631 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
632
633 if (!(fsr & FSR_FAULT))
634 return IRQ_NONE;
635
Will Deacon45ae7cf2013-06-24 18:31:25 +0100636 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Robin Murphyf9a05f02016-04-13 18:13:01 +0100637 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Vivek Gautambc580b52019-04-22 12:40:36 +0530638 cbfrsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100639
Will Deacon3714ce1d2016-08-05 19:49:45 +0100640 dev_err_ratelimited(smmu->dev,
Vivek Gautambc580b52019-04-22 12:40:36 +0530641 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
642 fsr, iova, fsynr, cbfrsynra, cfg->cbndx);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100643
Will Deacon45ae7cf2013-06-24 18:31:25 +0100644 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100645 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100646}
647
648static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
649{
650 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
651 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000652 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100653
654 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
655 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
656 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
657 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
658
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000659 if (!gfsr)
660 return IRQ_NONE;
661
Will Deacon45ae7cf2013-06-24 18:31:25 +0100662 dev_err_ratelimited(smmu->dev,
663 "Unexpected global fault, this could be serious\n");
664 dev_err_ratelimited(smmu->dev,
665 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
666 gfsr, gfsynr0, gfsynr1, gfsynr2);
667
668 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100669 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100670}
671
Will Deacon518f7132014-11-14 17:17:54 +0000672static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
673 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100674{
Will Deacon44680ee2014-06-25 11:29:12 +0100675 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy90df3732017-08-08 14:56:14 +0100676 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
677 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
678
679 cb->cfg = cfg;
680
681 /* TTBCR */
682 if (stage1) {
683 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
684 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
685 } else {
686 cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
687 cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
688 cb->tcr[1] |= TTBCR2_SEP_UPSTREAM;
689 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
690 cb->tcr[1] |= TTBCR2_AS;
691 }
692 } else {
693 cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
694 }
695
696 /* TTBRs */
697 if (stage1) {
698 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
699 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
700 cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
701 } else {
702 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
703 cb->ttbr[0] |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
704 cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
705 cb->ttbr[1] |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
706 }
707 } else {
708 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
709 }
710
711 /* MAIRs (stage-1 only) */
712 if (stage1) {
713 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
714 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
715 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
716 } else {
717 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
718 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
719 }
720 }
721}
722
723static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
724{
725 u32 reg;
726 bool stage1;
727 struct arm_smmu_cb *cb = &smmu->cbs[idx];
728 struct arm_smmu_cfg *cfg = cb->cfg;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100729 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100730
Robin Murphy90df3732017-08-08 14:56:14 +0100731 cb_base = ARM_SMMU_CB(smmu, idx);
732
733 /* Unassigned context banks only need disabling */
734 if (!cfg) {
735 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
736 return;
737 }
738
Will Deacon45ae7cf2013-06-24 18:31:25 +0100739 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100740 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100741
Robin Murphy90df3732017-08-08 14:56:14 +0100742 /* CBA2R */
Will Deacon4a1c93c2015-03-04 12:21:03 +0000743 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100744 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
745 reg = CBA2R_RW64_64BIT;
746 else
747 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800748 /* 16-bit VMIDs live in CBA2R */
749 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Robin Murphy280b6832017-03-30 17:56:29 +0100750 reg |= cfg->vmid << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800751
Robin Murphy90df3732017-08-08 14:56:14 +0100752 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(idx));
Will Deacon4a1c93c2015-03-04 12:21:03 +0000753 }
754
Will Deacon45ae7cf2013-06-24 18:31:25 +0100755 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100756 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100757 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700758 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100759
Will Deacon57ca90f2014-02-06 14:59:05 +0000760 /*
761 * Use the weakest shareability/memory types, so they are
762 * overridden by the ttbcr/pte.
763 */
764 if (stage1) {
765 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
766 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800767 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
768 /* 8-bit VMIDs live in CBAR */
Robin Murphy280b6832017-03-30 17:56:29 +0100769 reg |= cfg->vmid << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000770 }
Robin Murphy90df3732017-08-08 14:56:14 +0100771 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(idx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100772
Sunil Goutham125458a2017-03-28 16:11:12 +0530773 /*
774 * TTBCR
775 * We must write this before the TTBRs, since it determines the
776 * access behaviour of some fields (in particular, ASID[15:8]).
777 */
Robin Murphy90df3732017-08-08 14:56:14 +0100778 if (stage1 && smmu->version > ARM_SMMU_V1)
779 writel_relaxed(cb->tcr[1], cb_base + ARM_SMMU_CB_TTBCR2);
780 writel_relaxed(cb->tcr[0], cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100781
Will Deacon45ae7cf2013-06-24 18:31:25 +0100782 /* TTBRs */
Robin Murphy90df3732017-08-08 14:56:14 +0100783 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
784 writel_relaxed(cfg->asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
785 writel_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0);
786 writel_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100787 } else {
Robin Murphy90df3732017-08-08 14:56:14 +0100788 writeq_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0);
789 if (stage1)
790 writeq_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100791 }
792
Will Deacon518f7132014-11-14 17:17:54 +0000793 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100794 if (stage1) {
Robin Murphy90df3732017-08-08 14:56:14 +0100795 writel_relaxed(cb->mair[0], cb_base + ARM_SMMU_CB_S1_MAIR0);
796 writel_relaxed(cb->mair[1], cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100797 }
798
Will Deacon45ae7cf2013-06-24 18:31:25 +0100799 /* SCTLR */
Robin Murphy60705292016-08-11 17:44:06 +0100800 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100801 if (stage1)
802 reg |= SCTLR_S1_ASIDPNE;
Robin Murphy90df3732017-08-08 14:56:14 +0100803 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
804 reg |= SCTLR_E;
805
Will Deacon25724842013-08-21 13:49:53 +0100806 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100807}
808
809static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100810 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100811{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100812 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000813 unsigned long ias, oas;
814 struct io_pgtable_ops *pgtbl_ops;
815 struct io_pgtable_cfg pgtbl_cfg;
816 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100817 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100818 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100819
Will Deacon518f7132014-11-14 17:17:54 +0000820 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100821 if (smmu_domain->smmu)
822 goto out_unlock;
823
Will Deacon61bc6712017-01-06 16:56:03 +0000824 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
825 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
826 smmu_domain->smmu = smmu;
827 goto out_unlock;
828 }
829
Will Deaconc752ce42014-06-25 22:46:31 +0100830 /*
831 * Mapping the requested stage onto what we support is surprisingly
832 * complicated, mainly because the spec allows S1+S2 SMMUs without
833 * support for nested translation. That means we end up with the
834 * following table:
835 *
836 * Requested Supported Actual
837 * S1 N S1
838 * S1 S1+S2 S1
839 * S1 S2 S2
840 * S1 S1 S1
841 * N N N
842 * N S1+S2 S2
843 * N S2 S2
844 * N S1 S1
845 *
846 * Note that you can't actually request stage-2 mappings.
847 */
848 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
849 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
850 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
851 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
852
Robin Murphy7602b872016-04-28 17:12:09 +0100853 /*
854 * Choosing a suitable context format is even more fiddly. Until we
855 * grow some way for the caller to express a preference, and/or move
856 * the decision into the io-pgtable code where it arguably belongs,
857 * just aim for the closest thing to the rest of the system, and hope
858 * that the hardware isn't esoteric enough that we can't assume AArch64
859 * support to be a superset of AArch32 support...
860 */
861 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
862 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100863 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
864 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
865 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
866 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
867 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100868 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
869 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
870 ARM_SMMU_FEAT_FMT_AARCH64_16K |
871 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
872 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
873
874 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
875 ret = -EINVAL;
876 goto out_unlock;
877 }
878
Will Deaconc752ce42014-06-25 22:46:31 +0100879 switch (smmu_domain->stage) {
880 case ARM_SMMU_DOMAIN_S1:
881 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
882 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000883 ias = smmu->va_size;
884 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100885 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000886 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +0100887 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +0000888 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100889 ias = min(ias, 32UL);
890 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +0100891 } else {
892 fmt = ARM_V7S;
893 ias = min(ias, 32UL);
894 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +0100895 }
Will Deaconabfd6fe2019-07-02 16:44:41 +0100896 smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops;
Will Deaconc752ce42014-06-25 22:46:31 +0100897 break;
898 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100899 /*
900 * We will likely want to change this if/when KVM gets
901 * involved.
902 */
Will Deaconc752ce42014-06-25 22:46:31 +0100903 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100904 cfg->cbar = CBAR_TYPE_S2_TRANS;
905 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000906 ias = smmu->ipa_size;
907 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100908 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000909 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100910 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000911 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100912 ias = min(ias, 40UL);
913 oas = min(oas, 40UL);
914 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100915 if (smmu->version == ARM_SMMU_V2)
Will Deaconabfd6fe2019-07-02 16:44:41 +0100916 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2;
Robin Murphy11febfc2017-03-30 17:56:31 +0100917 else
Will Deaconabfd6fe2019-07-02 16:44:41 +0100918 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1;
Will Deaconc752ce42014-06-25 22:46:31 +0100919 break;
920 default:
921 ret = -EINVAL;
922 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100923 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100924 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
925 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200926 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100927 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100928
Will Deacon44680ee2014-06-25 11:29:12 +0100929 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100930 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100931 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
932 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100933 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100934 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100935 }
936
Robin Murphy280b6832017-03-30 17:56:29 +0100937 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
938 cfg->vmid = cfg->cbndx + 1 + smmu->cavium_id_base;
939 else
940 cfg->asid = cfg->cbndx + smmu->cavium_id_base;
941
Will Deacon518f7132014-11-14 17:17:54 +0000942 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100943 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000944 .ias = ias,
945 .oas = oas,
Will Deacon4f418452019-06-25 12:51:25 +0100946 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
Will Deaconabfd6fe2019-07-02 16:44:41 +0100947 .tlb = &smmu_domain->flush_ops->tlb,
Robin Murphy2df7a252015-07-29 19:46:06 +0100948 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000949 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100950
Robin Murphy44f68762018-09-20 17:10:27 +0100951 if (smmu_domain->non_strict)
952 pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
953
Will Deacon518f7132014-11-14 17:17:54 +0000954 smmu_domain->smmu = smmu;
955 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
956 if (!pgtbl_ops) {
957 ret = -ENOMEM;
958 goto out_clear_smmu;
959 }
960
Robin Murphyd5466352016-05-09 17:20:09 +0100961 /* Update the domain's page sizes to reflect the page table format */
962 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Robin Murphy455eb7d2016-09-12 17:13:58 +0100963 domain->geometry.aperture_end = (1UL << ias) - 1;
964 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +0000965
966 /* Initialise the context bank with our page table cfg */
967 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
Robin Murphy90df3732017-08-08 14:56:14 +0100968 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000969
970 /*
971 * Request context fault interrupt. Do this last to avoid the
972 * handler seeing a half-initialised domain state.
973 */
Will Deacon44680ee2014-06-25 11:29:12 +0100974 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800975 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
976 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200977 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100978 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100979 cfg->irptndx, irq);
980 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100981 }
982
Will Deacon518f7132014-11-14 17:17:54 +0000983 mutex_unlock(&smmu_domain->init_mutex);
984
985 /* Publish page table ops for map/unmap */
986 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100987 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100988
Will Deacon518f7132014-11-14 17:17:54 +0000989out_clear_smmu:
990 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100991out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +0000992 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100993 return ret;
994}
995
996static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
997{
Joerg Roedel1d672632015-03-26 13:43:10 +0100998 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100999 struct arm_smmu_device *smmu = smmu_domain->smmu;
1000 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301001 int ret, irq;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001002
Will Deacon61bc6712017-01-06 16:56:03 +00001003 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001004 return;
1005
Sricharan Rd4a44f02018-12-04 11:52:10 +05301006 ret = arm_smmu_rpm_get(smmu);
1007 if (ret < 0)
1008 return;
1009
Will Deacon518f7132014-11-14 17:17:54 +00001010 /*
1011 * Disable the context bank and free the page tables before freeing
1012 * it.
1013 */
Robin Murphy90df3732017-08-08 14:56:14 +01001014 smmu->cbs[cfg->cbndx].cfg = NULL;
1015 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001016
Will Deacon44680ee2014-06-25 11:29:12 +01001017 if (cfg->irptndx != INVALID_IRPTNDX) {
1018 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001019 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001020 }
1021
Markus Elfring44830b02015-11-06 18:32:41 +01001022 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +01001023 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301024
1025 arm_smmu_rpm_put(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001026}
1027
Joerg Roedel1d672632015-03-26 13:43:10 +01001028static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001029{
1030 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001031
Will Deacon61bc6712017-01-06 16:56:03 +00001032 if (type != IOMMU_DOMAIN_UNMANAGED &&
1033 type != IOMMU_DOMAIN_DMA &&
1034 type != IOMMU_DOMAIN_IDENTITY)
Joerg Roedel1d672632015-03-26 13:43:10 +01001035 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001036 /*
1037 * Allocate the domain and initialise some of its data structures.
1038 * We can't really do anything meaningful until we've added a
1039 * master.
1040 */
1041 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1042 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001043 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001044
Robin Murphy021bb842016-09-14 15:26:46 +01001045 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
1046 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +00001047 kfree(smmu_domain);
1048 return NULL;
1049 }
1050
Will Deacon518f7132014-11-14 17:17:54 +00001051 mutex_init(&smmu_domain->init_mutex);
Robin Murphy523d7422017-06-22 16:53:56 +01001052 spin_lock_init(&smmu_domain->cb_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001053
1054 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001055}
1056
Joerg Roedel1d672632015-03-26 13:43:10 +01001057static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001058{
Joerg Roedel1d672632015-03-26 13:43:10 +01001059 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001060
1061 /*
1062 * Free the domain resources. We assume that all devices have
1063 * already been detached.
1064 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001065 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001066 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001067 kfree(smmu_domain);
1068}
1069
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001070static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1071{
1072 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyf80cd882016-09-14 15:21:39 +01001073 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001074
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001075 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001076 reg |= SMR_VALID;
1077 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1078}
1079
Robin Murphy8e8b2032016-09-12 17:13:50 +01001080static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1081{
1082 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1083 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1084 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1085 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1086
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001087 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
1088 smmu->smrs[idx].valid)
1089 reg |= S2CR_EXIDVALID;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001090 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1091}
1092
1093static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1094{
1095 arm_smmu_write_s2cr(smmu, idx);
1096 if (smmu->smrs)
1097 arm_smmu_write_smr(smmu, idx);
1098}
1099
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001100/*
1101 * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
1102 * should be called after sCR0 is written.
1103 */
1104static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
1105{
1106 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1107 u32 smr;
1108
1109 if (!smmu->smrs)
1110 return;
1111
1112 /*
1113 * SMR.ID bits may not be preserved if the corresponding MASK
1114 * bits are set, so check each one separately. We can reject
1115 * masters later if they try to claim IDs outside these masks.
1116 */
1117 smr = smmu->streamid_mask << SMR_ID_SHIFT;
1118 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1119 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1120 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
1121
1122 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1123 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1124 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1125 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
1126}
1127
Robin Murphy588888a2016-09-12 17:13:54 +01001128static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001129{
1130 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy588888a2016-09-12 17:13:54 +01001131 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001132
Robin Murphy588888a2016-09-12 17:13:54 +01001133 /* Stream indexing is blissfully easy */
1134 if (!smrs)
1135 return id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001136
Robin Murphy588888a2016-09-12 17:13:54 +01001137 /* Validating SMRs is... less so */
1138 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1139 if (!smrs[i].valid) {
1140 /*
1141 * Note the first free entry we come across, which
1142 * we'll claim in the end if nothing else matches.
1143 */
1144 if (free_idx < 0)
1145 free_idx = i;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001146 continue;
1147 }
Robin Murphy588888a2016-09-12 17:13:54 +01001148 /*
1149 * If the new entry is _entirely_ matched by an existing entry,
1150 * then reuse that, with the guarantee that there also cannot
1151 * be any subsequent conflicting entries. In normal use we'd
1152 * expect simply identical entries for this case, but there's
1153 * no harm in accommodating the generalisation.
1154 */
1155 if ((mask & smrs[i].mask) == mask &&
1156 !((id ^ smrs[i].id) & ~smrs[i].mask))
1157 return i;
1158 /*
1159 * If the new entry has any other overlap with an existing one,
1160 * though, then there always exists at least one stream ID
1161 * which would cause a conflict, and we can't allow that risk.
1162 */
1163 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1164 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001165 }
1166
Robin Murphy588888a2016-09-12 17:13:54 +01001167 return free_idx;
1168}
1169
1170static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1171{
1172 if (--smmu->s2crs[idx].count)
1173 return false;
1174
1175 smmu->s2crs[idx] = s2cr_init_val;
1176 if (smmu->smrs)
1177 smmu->smrs[idx].valid = false;
1178
1179 return true;
1180}
1181
1182static int arm_smmu_master_alloc_smes(struct device *dev)
1183{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001184 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001185 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy588888a2016-09-12 17:13:54 +01001186 struct arm_smmu_device *smmu = cfg->smmu;
1187 struct arm_smmu_smr *smrs = smmu->smrs;
1188 struct iommu_group *group;
1189 int i, idx, ret;
1190
1191 mutex_lock(&smmu->stream_map_mutex);
1192 /* Figure out a viable stream map entry allocation */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001193 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy021bb842016-09-14 15:26:46 +01001194 u16 sid = fwspec->ids[i];
1195 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1196
Robin Murphy588888a2016-09-12 17:13:54 +01001197 if (idx != INVALID_SMENDX) {
1198 ret = -EEXIST;
1199 goto out_err;
1200 }
1201
Robin Murphy021bb842016-09-14 15:26:46 +01001202 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy588888a2016-09-12 17:13:54 +01001203 if (ret < 0)
1204 goto out_err;
1205
1206 idx = ret;
1207 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy021bb842016-09-14 15:26:46 +01001208 smrs[idx].id = sid;
1209 smrs[idx].mask = mask;
Robin Murphy588888a2016-09-12 17:13:54 +01001210 smrs[idx].valid = true;
1211 }
1212 smmu->s2crs[idx].count++;
1213 cfg->smendx[i] = (s16)idx;
1214 }
1215
1216 group = iommu_group_get_for_dev(dev);
1217 if (!group)
1218 group = ERR_PTR(-ENOMEM);
1219 if (IS_ERR(group)) {
1220 ret = PTR_ERR(group);
1221 goto out_err;
1222 }
1223 iommu_group_put(group);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001224
Will Deacon45ae7cf2013-06-24 18:31:25 +01001225 /* It worked! Now, poke the actual hardware */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001226 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001227 arm_smmu_write_sme(smmu, idx);
1228 smmu->s2crs[idx].group = group;
1229 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001230
Robin Murphy588888a2016-09-12 17:13:54 +01001231 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001232 return 0;
1233
Robin Murphy588888a2016-09-12 17:13:54 +01001234out_err:
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001235 while (i--) {
Robin Murphy588888a2016-09-12 17:13:54 +01001236 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001237 cfg->smendx[i] = INVALID_SMENDX;
1238 }
Robin Murphy588888a2016-09-12 17:13:54 +01001239 mutex_unlock(&smmu->stream_map_mutex);
1240 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001241}
1242
Robin Murphyadfec2e2016-09-12 17:13:55 +01001243static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001244{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001245 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1246 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphyd3097e32016-09-12 17:13:53 +01001247 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001248
Robin Murphy588888a2016-09-12 17:13:54 +01001249 mutex_lock(&smmu->stream_map_mutex);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001250 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001251 if (arm_smmu_free_sme(smmu, idx))
1252 arm_smmu_write_sme(smmu, idx);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001253 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001254 }
Robin Murphy588888a2016-09-12 17:13:54 +01001255 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001256}
1257
Will Deacon45ae7cf2013-06-24 18:31:25 +01001258static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphyadfec2e2016-09-12 17:13:55 +01001259 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001260{
Will Deacon44680ee2014-06-25 11:29:12 +01001261 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001262 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001263 u8 cbndx = smmu_domain->cfg.cbndx;
Will Deacon61bc6712017-01-06 16:56:03 +00001264 enum arm_smmu_s2cr_type type;
Robin Murphy588888a2016-09-12 17:13:54 +01001265 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001266
Will Deacon61bc6712017-01-06 16:56:03 +00001267 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
1268 type = S2CR_TYPE_BYPASS;
1269 else
1270 type = S2CR_TYPE_TRANS;
1271
Robin Murphyadfec2e2016-09-12 17:13:55 +01001272 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy8e8b2032016-09-12 17:13:50 +01001273 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy588888a2016-09-12 17:13:54 +01001274 continue;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001275
Robin Murphy8e8b2032016-09-12 17:13:50 +01001276 s2cr[idx].type = type;
Sricharan Re1989802017-01-06 18:58:15 +05301277 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001278 s2cr[idx].cbndx = cbndx;
1279 arm_smmu_write_s2cr(smmu, idx);
Will Deacon43b412b2014-07-15 11:22:24 +01001280 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001281 return 0;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001282}
1283
Will Deacon45ae7cf2013-06-24 18:31:25 +01001284static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1285{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001286 int ret;
Joerg Roedel9b468f72018-11-29 14:01:00 +01001287 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001288 struct arm_smmu_device *smmu;
Joerg Roedel1d672632015-03-26 13:43:10 +01001289 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001290
Robin Murphyadfec2e2016-09-12 17:13:55 +01001291 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001292 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1293 return -ENXIO;
1294 }
1295
Robin Murphyfba4f8e2016-10-17 12:06:21 +01001296 /*
1297 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1298 * domains between of_xlate() and add_device() - we have no way to cope
1299 * with that, so until ARM gets converted to rely on groups and default
1300 * domains, just say no (but more politely than by dereferencing NULL).
1301 * This should be at least a WARN_ON once that's sorted.
1302 */
1303 if (!fwspec->iommu_priv)
1304 return -ENODEV;
1305
Robin Murphyadfec2e2016-09-12 17:13:55 +01001306 smmu = fwspec_smmu(fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301307
1308 ret = arm_smmu_rpm_get(smmu);
1309 if (ret < 0)
1310 return ret;
1311
Will Deacon518f7132014-11-14 17:17:54 +00001312 /* Ensure that the domain is finalised */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001313 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001314 if (ret < 0)
Sricharan Rd4a44f02018-12-04 11:52:10 +05301315 goto rpm_put;
Will Deacon518f7132014-11-14 17:17:54 +00001316
Will Deacon45ae7cf2013-06-24 18:31:25 +01001317 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001318 * Sanity check the domain. We don't support domains across
1319 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001320 */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001321 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001322 dev_err(dev,
1323 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphyadfec2e2016-09-12 17:13:55 +01001324 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Sricharan Rd4a44f02018-12-04 11:52:10 +05301325 ret = -EINVAL;
1326 goto rpm_put;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001327 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001328
1329 /* Looks ok, so add the device to the domain */
Sricharan Rd4a44f02018-12-04 11:52:10 +05301330 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
1331
1332rpm_put:
1333 arm_smmu_rpm_put(smmu);
1334 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001335}
1336
Will Deacon45ae7cf2013-06-24 18:31:25 +01001337static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001338 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001339{
Robin Murphy523d7422017-06-22 16:53:56 +01001340 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301341 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1342 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001343
Will Deacon518f7132014-11-14 17:17:54 +00001344 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001345 return -ENODEV;
1346
Sricharan Rd4a44f02018-12-04 11:52:10 +05301347 arm_smmu_rpm_get(smmu);
1348 ret = ops->map(ops, iova, paddr, size, prot);
1349 arm_smmu_rpm_put(smmu);
1350
1351 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001352}
1353
1354static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
Will Deacon56f8af52019-07-02 16:44:06 +01001355 size_t size, struct iommu_iotlb_gather *gather)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001356{
Robin Murphy523d7422017-06-22 16:53:56 +01001357 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301358 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1359 size_t ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001360
Will Deacon518f7132014-11-14 17:17:54 +00001361 if (!ops)
1362 return 0;
1363
Sricharan Rd4a44f02018-12-04 11:52:10 +05301364 arm_smmu_rpm_get(smmu);
1365 ret = ops->unmap(ops, iova, size);
1366 arm_smmu_rpm_put(smmu);
1367
1368 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001369}
1370
Robin Murphy44f68762018-09-20 17:10:27 +01001371static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
1372{
1373 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301374 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy44f68762018-09-20 17:10:27 +01001375
Will Deaconabfd6fe2019-07-02 16:44:41 +01001376 if (smmu_domain->flush_ops) {
Sricharan Rd4a44f02018-12-04 11:52:10 +05301377 arm_smmu_rpm_get(smmu);
Will Deaconabfd6fe2019-07-02 16:44:41 +01001378 smmu_domain->flush_ops->tlb.tlb_flush_all(smmu_domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301379 arm_smmu_rpm_put(smmu);
1380 }
Robin Murphy44f68762018-09-20 17:10:27 +01001381}
1382
Will Deacon56f8af52019-07-02 16:44:06 +01001383static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
1384 struct iommu_iotlb_gather *gather)
Robin Murphy32b12442017-09-28 15:55:01 +01001385{
1386 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301387 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy32b12442017-09-28 15:55:01 +01001388
Will Deaconabfd6fe2019-07-02 16:44:41 +01001389 if (smmu_domain->flush_ops) {
Sricharan Rd4a44f02018-12-04 11:52:10 +05301390 arm_smmu_rpm_get(smmu);
Will Deacone953f7f2019-07-02 16:44:50 +01001391 smmu_domain->flush_ops->tlb_sync(smmu_domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301392 arm_smmu_rpm_put(smmu);
1393 }
Robin Murphy32b12442017-09-28 15:55:01 +01001394}
1395
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001396static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1397 dma_addr_t iova)
1398{
Joerg Roedel1d672632015-03-26 13:43:10 +01001399 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001400 struct arm_smmu_device *smmu = smmu_domain->smmu;
1401 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1402 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1403 struct device *dev = smmu->dev;
1404 void __iomem *cb_base;
1405 u32 tmp;
1406 u64 phys;
Robin Murphy523d7422017-06-22 16:53:56 +01001407 unsigned long va, flags;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301408 int ret;
1409
1410 ret = arm_smmu_rpm_get(smmu);
1411 if (ret < 0)
1412 return 0;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001413
Robin Murphy452107c2017-03-30 17:56:30 +01001414 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001415
Robin Murphy523d7422017-06-22 16:53:56 +01001416 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy661d9622015-05-27 17:09:34 +01001417 /* ATS1 registers can only be written atomically */
1418 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001419 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001420 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1421 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001422 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001423
1424 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1425 !(tmp & ATSR_ACTIVE), 5, 50)) {
Robin Murphy523d7422017-06-22 16:53:56 +01001426 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001427 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001428 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001429 &iova);
1430 return ops->iova_to_phys(ops, iova);
1431 }
1432
Robin Murphyf9a05f02016-04-13 18:13:01 +01001433 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Robin Murphy523d7422017-06-22 16:53:56 +01001434 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001435 if (phys & CB_PAR_F) {
1436 dev_err(dev, "translation fault!\n");
1437 dev_err(dev, "PAR = 0x%llx\n", phys);
1438 return 0;
1439 }
1440
Sricharan Rd4a44f02018-12-04 11:52:10 +05301441 arm_smmu_rpm_put(smmu);
1442
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001443 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1444}
1445
Will Deacon45ae7cf2013-06-24 18:31:25 +01001446static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001447 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001448{
Joerg Roedel1d672632015-03-26 13:43:10 +01001449 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Robin Murphy523d7422017-06-22 16:53:56 +01001450 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001451
Sunil Gouthambdf95922017-04-25 15:27:52 +05301452 if (domain->type == IOMMU_DOMAIN_IDENTITY)
1453 return iova;
1454
Will Deacon518f7132014-11-14 17:17:54 +00001455 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001456 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001457
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001458 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
Robin Murphy523d7422017-06-22 16:53:56 +01001459 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1460 return arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001461
Robin Murphy523d7422017-06-22 16:53:56 +01001462 return ops->iova_to_phys(ops, iova);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001463}
1464
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001465static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001466{
Will Deacond0948942014-06-24 17:30:10 +01001467 switch (cap) {
1468 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001469 /*
1470 * Return true here as the SMMU can always send out coherent
1471 * requests.
1472 */
1473 return true;
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001474 case IOMMU_CAP_NOEXEC:
1475 return true;
Will Deacond0948942014-06-24 17:30:10 +01001476 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001477 return false;
Will Deacond0948942014-06-24 17:30:10 +01001478 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001479}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001480
Suzuki K Poulose92ce7e82019-06-14 18:54:00 +01001481static int arm_smmu_match_node(struct device *dev, const void *data)
Robin Murphy021bb842016-09-14 15:26:46 +01001482{
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001483 return dev->fwnode == data;
Robin Murphy021bb842016-09-14 15:26:46 +01001484}
1485
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001486static
1487struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
Robin Murphy021bb842016-09-14 15:26:46 +01001488{
1489 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001490 fwnode, arm_smmu_match_node);
Robin Murphy021bb842016-09-14 15:26:46 +01001491 put_device(dev);
1492 return dev ? dev_get_drvdata(dev) : NULL;
1493}
1494
Will Deacon03edb222015-01-19 14:27:33 +00001495static int arm_smmu_add_device(struct device *dev)
1496{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001497 struct arm_smmu_device *smmu;
Robin Murphyf80cd882016-09-14 15:21:39 +01001498 struct arm_smmu_master_cfg *cfg;
Joerg Roedel9b468f72018-11-29 14:01:00 +01001499 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001500 int i, ret;
1501
Robin Murphy021bb842016-09-14 15:26:46 +01001502 if (using_legacy_binding) {
1503 ret = arm_smmu_register_legacy_master(dev, &smmu);
Artem Savkova7990c62017-08-08 12:26:02 +02001504
1505 /*
1506 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
1507 * will allocate/initialise a new one. Thus we need to update fwspec for
1508 * later use.
1509 */
Joerg Roedel9b468f72018-11-29 14:01:00 +01001510 fwspec = dev_iommu_fwspec_get(dev);
Robin Murphy021bb842016-09-14 15:26:46 +01001511 if (ret)
1512 goto out_free;
Robin Murphy3c117b52016-11-02 17:31:32 +00001513 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001514 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
Robin Murphy021bb842016-09-14 15:26:46 +01001515 } else {
1516 return -ENODEV;
1517 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001518
1519 ret = -EINVAL;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001520 for (i = 0; i < fwspec->num_ids; i++) {
1521 u16 sid = fwspec->ids[i];
Robin Murphy021bb842016-09-14 15:26:46 +01001522 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyf80cd882016-09-14 15:21:39 +01001523
Robin Murphyadfec2e2016-09-12 17:13:55 +01001524 if (sid & ~smmu->streamid_mask) {
Robin Murphyf80cd882016-09-14 15:21:39 +01001525 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy021bb842016-09-14 15:26:46 +01001526 sid, smmu->streamid_mask);
1527 goto out_free;
1528 }
1529 if (mask & ~smmu->smr_mask_mask) {
1530 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
Peng Fan6323f472017-04-21 17:03:36 +08001531 mask, smmu->smr_mask_mask);
Robin Murphyf80cd882016-09-14 15:21:39 +01001532 goto out_free;
1533 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001534 }
Will Deacon03edb222015-01-19 14:27:33 +00001535
Robin Murphyadfec2e2016-09-12 17:13:55 +01001536 ret = -ENOMEM;
1537 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1538 GFP_KERNEL);
1539 if (!cfg)
1540 goto out_free;
1541
1542 cfg->smmu = smmu;
1543 fwspec->iommu_priv = cfg;
1544 while (i--)
1545 cfg->smendx[i] = INVALID_SMENDX;
1546
Sricharan Rd4a44f02018-12-04 11:52:10 +05301547 ret = arm_smmu_rpm_get(smmu);
1548 if (ret < 0)
1549 goto out_cfg_free;
1550
Robin Murphy588888a2016-09-12 17:13:54 +01001551 ret = arm_smmu_master_alloc_smes(dev);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301552 arm_smmu_rpm_put(smmu);
1553
Robin Murphyadfec2e2016-09-12 17:13:55 +01001554 if (ret)
Vivek Gautamc54451a2017-07-06 15:07:00 +05301555 goto out_cfg_free;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001556
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001557 iommu_device_link(&smmu->iommu, dev);
1558
Sricharan R655e3642018-12-04 11:52:11 +05301559 device_link_add(dev, smmu->dev,
1560 DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
1561
Robin Murphyadfec2e2016-09-12 17:13:55 +01001562 return 0;
Robin Murphyf80cd882016-09-14 15:21:39 +01001563
Vivek Gautamc54451a2017-07-06 15:07:00 +05301564out_cfg_free:
1565 kfree(cfg);
Robin Murphyf80cd882016-09-14 15:21:39 +01001566out_free:
Robin Murphyadfec2e2016-09-12 17:13:55 +01001567 iommu_fwspec_free(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001568 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00001569}
1570
Will Deacon45ae7cf2013-06-24 18:31:25 +01001571static void arm_smmu_remove_device(struct device *dev)
1572{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001573 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001574 struct arm_smmu_master_cfg *cfg;
1575 struct arm_smmu_device *smmu;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301576 int ret;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001577
Robin Murphyadfec2e2016-09-12 17:13:55 +01001578 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyf80cd882016-09-14 15:21:39 +01001579 return;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001580
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001581 cfg = fwspec->iommu_priv;
1582 smmu = cfg->smmu;
1583
Sricharan Rd4a44f02018-12-04 11:52:10 +05301584 ret = arm_smmu_rpm_get(smmu);
1585 if (ret < 0)
1586 return;
1587
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001588 iommu_device_unlink(&smmu->iommu, dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001589 arm_smmu_master_free_smes(fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301590
1591 arm_smmu_rpm_put(smmu);
1592
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001593 iommu_group_remove_device(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001594 kfree(fwspec->iommu_priv);
1595 iommu_fwspec_free(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001596}
1597
Joerg Roedelaf659932015-10-21 23:51:41 +02001598static struct iommu_group *arm_smmu_device_group(struct device *dev)
1599{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001600 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001601 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy588888a2016-09-12 17:13:54 +01001602 struct iommu_group *group = NULL;
1603 int i, idx;
1604
Robin Murphyadfec2e2016-09-12 17:13:55 +01001605 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001606 if (group && smmu->s2crs[idx].group &&
1607 group != smmu->s2crs[idx].group)
1608 return ERR_PTR(-EINVAL);
1609
1610 group = smmu->s2crs[idx].group;
1611 }
1612
1613 if (group)
Robin Murphye1b44cb2016-11-11 17:59:22 +00001614 return iommu_group_ref_get(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001615
1616 if (dev_is_pci(dev))
1617 group = pci_device_group(dev);
Nipun Guptaeab03e22018-09-10 19:19:18 +05301618 else if (dev_is_fsl_mc(dev))
1619 group = fsl_mc_device_group(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001620 else
1621 group = generic_device_group(dev);
1622
Joerg Roedelaf659932015-10-21 23:51:41 +02001623 return group;
1624}
1625
Will Deaconc752ce42014-06-25 22:46:31 +01001626static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1627 enum iommu_attr attr, void *data)
1628{
Joerg Roedel1d672632015-03-26 13:43:10 +01001629 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001630
Robin Murphy44f68762018-09-20 17:10:27 +01001631 switch(domain->type) {
1632 case IOMMU_DOMAIN_UNMANAGED:
1633 switch (attr) {
1634 case DOMAIN_ATTR_NESTING:
1635 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1636 return 0;
1637 default:
1638 return -ENODEV;
1639 }
1640 break;
1641 case IOMMU_DOMAIN_DMA:
1642 switch (attr) {
1643 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1644 *(int *)data = smmu_domain->non_strict;
1645 return 0;
1646 default:
1647 return -ENODEV;
1648 }
1649 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001650 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001651 return -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001652 }
1653}
1654
1655static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1656 enum iommu_attr attr, void *data)
1657{
Will Deacon518f7132014-11-14 17:17:54 +00001658 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001659 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001660
Will Deacon518f7132014-11-14 17:17:54 +00001661 mutex_lock(&smmu_domain->init_mutex);
1662
Robin Murphy44f68762018-09-20 17:10:27 +01001663 switch(domain->type) {
1664 case IOMMU_DOMAIN_UNMANAGED:
1665 switch (attr) {
1666 case DOMAIN_ATTR_NESTING:
1667 if (smmu_domain->smmu) {
1668 ret = -EPERM;
1669 goto out_unlock;
1670 }
1671
1672 if (*(int *)data)
1673 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1674 else
1675 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1676 break;
1677 default:
1678 ret = -ENODEV;
Will Deacon518f7132014-11-14 17:17:54 +00001679 }
Robin Murphy44f68762018-09-20 17:10:27 +01001680 break;
1681 case IOMMU_DOMAIN_DMA:
1682 switch (attr) {
1683 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1684 smmu_domain->non_strict = *(int *)data;
1685 break;
1686 default:
1687 ret = -ENODEV;
1688 }
Will Deacon518f7132014-11-14 17:17:54 +00001689 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001690 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001691 ret = -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001692 }
Will Deacon518f7132014-11-14 17:17:54 +00001693out_unlock:
1694 mutex_unlock(&smmu_domain->init_mutex);
1695 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001696}
1697
Robin Murphy021bb842016-09-14 15:26:46 +01001698static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1699{
Robin Murphy56fbf602017-03-31 12:03:33 +01001700 u32 mask, fwid = 0;
Robin Murphy021bb842016-09-14 15:26:46 +01001701
1702 if (args->args_count > 0)
1703 fwid |= (u16)args->args[0];
1704
1705 if (args->args_count > 1)
1706 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
Robin Murphy56fbf602017-03-31 12:03:33 +01001707 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
1708 fwid |= (u16)mask << SMR_MASK_SHIFT;
Robin Murphy021bb842016-09-14 15:26:46 +01001709
1710 return iommu_fwspec_add_ids(dev, &fwid, 1);
1711}
1712
Eric Augerf3ebee82017-01-19 20:57:55 +00001713static void arm_smmu_get_resv_regions(struct device *dev,
1714 struct list_head *head)
1715{
1716 struct iommu_resv_region *region;
1717 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1718
1719 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00001720 prot, IOMMU_RESV_SW_MSI);
Eric Augerf3ebee82017-01-19 20:57:55 +00001721 if (!region)
1722 return;
1723
1724 list_add_tail(&region->list, head);
Robin Murphy273df962017-03-16 17:00:19 +00001725
1726 iommu_dma_get_resv_regions(dev, head);
Eric Augerf3ebee82017-01-19 20:57:55 +00001727}
1728
1729static void arm_smmu_put_resv_regions(struct device *dev,
1730 struct list_head *head)
1731{
1732 struct iommu_resv_region *entry, *next;
1733
1734 list_for_each_entry_safe(entry, next, head, list)
1735 kfree(entry);
1736}
1737
Will Deacon518f7132014-11-14 17:17:54 +00001738static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001739 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001740 .domain_alloc = arm_smmu_domain_alloc,
1741 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001742 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001743 .map = arm_smmu_map,
1744 .unmap = arm_smmu_unmap,
Robin Murphy44f68762018-09-20 17:10:27 +01001745 .flush_iotlb_all = arm_smmu_flush_iotlb_all,
Robin Murphy32b12442017-09-28 15:55:01 +01001746 .iotlb_sync = arm_smmu_iotlb_sync,
Will Deaconc752ce42014-06-25 22:46:31 +01001747 .iova_to_phys = arm_smmu_iova_to_phys,
1748 .add_device = arm_smmu_add_device,
1749 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001750 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001751 .domain_get_attr = arm_smmu_domain_get_attr,
1752 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy021bb842016-09-14 15:26:46 +01001753 .of_xlate = arm_smmu_of_xlate,
Eric Augerf3ebee82017-01-19 20:57:55 +00001754 .get_resv_regions = arm_smmu_get_resv_regions,
1755 .put_resv_regions = arm_smmu_put_resv_regions,
Will Deacon518f7132014-11-14 17:17:54 +00001756 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001757};
1758
1759static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1760{
1761 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001762 int i;
Peng Fan3ca37122016-05-03 21:50:30 +08001763 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001764
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001765 /* clear global FSR */
1766 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1767 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001768
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001769 /*
1770 * Reset stream mapping groups: Initial values mark all SMRn as
1771 * invalid and all S2CRn as bypass unless overridden.
1772 */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001773 for (i = 0; i < smmu->num_mapping_groups; ++i)
1774 arm_smmu_write_sme(smmu, i);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001775
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301776 if (smmu->model == ARM_MMU500) {
1777 /*
1778 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1779 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1780 * bit is only present in MMU-500r2 onwards.
1781 */
1782 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1783 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
Peng Fan3ca37122016-05-03 21:50:30 +08001784 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301785 if (major >= 2)
1786 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1787 /*
1788 * Allow unmatched Stream IDs to allocate bypass
1789 * TLB entries for reduced latency.
1790 */
Feng Kan74f55d32017-10-11 15:08:39 -07001791 reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN;
Peng Fan3ca37122016-05-03 21:50:30 +08001792 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1793 }
1794
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001795 /* Make sure all context banks are disabled and clear CB_FSR */
1796 for (i = 0; i < smmu->num_context_banks; ++i) {
Robin Murphy90df3732017-08-08 14:56:14 +01001797 void __iomem *cb_base = ARM_SMMU_CB(smmu, i);
1798
1799 arm_smmu_write_context_bank(smmu, i);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001800 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001801 /*
1802 * Disable MMU-500's not-particularly-beneficial next-page
1803 * prefetcher for the sake of errata #841119 and #826419.
1804 */
1805 if (smmu->model == ARM_MMU500) {
1806 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1807 reg &= ~ARM_MMU500_ACTLR_CPRE;
1808 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1809 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001810 }
Will Deacon1463fe42013-07-31 19:21:27 +01001811
Will Deacon45ae7cf2013-06-24 18:31:25 +01001812 /* Invalidate the TLB, just in case */
Robin Murphy4e4abae2019-06-03 14:15:37 +02001813 writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1814 writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001815
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001816 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001817
Will Deacon45ae7cf2013-06-24 18:31:25 +01001818 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001819 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001820
1821 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001822 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001823
Robin Murphy25a1c962016-02-10 14:25:33 +00001824 /* Enable client access, handling unmatched streams as appropriate */
1825 reg &= ~sCR0_CLIENTPD;
1826 if (disable_bypass)
1827 reg |= sCR0_USFCFG;
1828 else
1829 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001830
1831 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001832 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001833
1834 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001835 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001836
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001837 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1838 reg |= sCR0_VMID16EN;
1839
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001840 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1841 reg |= sCR0_EXIDENABLE;
1842
Will Deacon45ae7cf2013-06-24 18:31:25 +01001843 /* Push the button */
Robin Murphy11febfc2017-03-30 17:56:31 +01001844 arm_smmu_tlb_sync_global(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001845 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001846}
1847
1848static int arm_smmu_id_size_to_bits(int size)
1849{
1850 switch (size) {
1851 case 0:
1852 return 32;
1853 case 1:
1854 return 36;
1855 case 2:
1856 return 40;
1857 case 3:
1858 return 42;
1859 case 4:
1860 return 44;
1861 case 5:
1862 default:
1863 return 48;
1864 }
1865}
1866
1867static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1868{
1869 unsigned long size;
1870 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1871 u32 id;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001872 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001873 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001874
1875 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001876 dev_notice(smmu->dev, "SMMUv%d with:\n",
1877 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001878
1879 /* ID0 */
1880 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001881
1882 /* Restrict available stages based on module parameter */
1883 if (force_stage == 1)
1884 id &= ~(ID0_S2TS | ID0_NTS);
1885 else if (force_stage == 2)
1886 id &= ~(ID0_S1TS | ID0_NTS);
1887
Will Deacon45ae7cf2013-06-24 18:31:25 +01001888 if (id & ID0_S1TS) {
1889 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1890 dev_notice(smmu->dev, "\tstage 1 translation\n");
1891 }
1892
1893 if (id & ID0_S2TS) {
1894 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1895 dev_notice(smmu->dev, "\tstage 2 translation\n");
1896 }
1897
1898 if (id & ID0_NTS) {
1899 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1900 dev_notice(smmu->dev, "\tnested translation\n");
1901 }
1902
1903 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001904 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001905 dev_err(smmu->dev, "\tno translation support!\n");
1906 return -ENODEV;
1907 }
1908
Robin Murphyb7862e32016-04-13 18:13:03 +01001909 if ((id & ID0_S1TS) &&
1910 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001911 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1912 dev_notice(smmu->dev, "\taddress translation ops\n");
1913 }
1914
Robin Murphybae2c2d2015-07-29 19:46:05 +01001915 /*
1916 * In order for DMA API calls to work properly, we must defer to what
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001917 * the FW says about coherency, regardless of what the hardware claims.
Robin Murphybae2c2d2015-07-29 19:46:05 +01001918 * Fortunately, this also opens up a workaround for systems where the
1919 * ID register value has ended up configured incorrectly.
1920 */
Robin Murphybae2c2d2015-07-29 19:46:05 +01001921 cttw_reg = !!(id & ID0_CTTW);
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001922 if (cttw_fw || cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001923 dev_notice(smmu->dev, "\t%scoherent table walk\n",
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001924 cttw_fw ? "" : "non-");
1925 if (cttw_fw != cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001926 dev_notice(smmu->dev,
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001927 "\t(IDR0.CTTW overridden by FW configuration)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001928
Robin Murphy21174242016-09-12 17:13:48 +01001929 /* Max. number of entries we have for stream matching/indexing */
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001930 if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
1931 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1932 size = 1 << 16;
1933 } else {
1934 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
1935 }
Robin Murphy21174242016-09-12 17:13:48 +01001936 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001937 if (id & ID0_SMS) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001938 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy21174242016-09-12 17:13:48 +01001939 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
1940 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001941 dev_err(smmu->dev,
1942 "stream-matching supported, but no SMRs present!\n");
1943 return -ENODEV;
1944 }
1945
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001946 /* Zero-initialised to mark as invalid */
1947 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1948 GFP_KERNEL);
1949 if (!smmu->smrs)
1950 return -ENOMEM;
1951
Will Deacon45ae7cf2013-06-24 18:31:25 +01001952 dev_notice(smmu->dev,
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001953 "\tstream matching with %lu register groups", size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001954 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001955 /* s2cr->type == 0 means translation, so initialise explicitly */
1956 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1957 GFP_KERNEL);
1958 if (!smmu->s2crs)
1959 return -ENOMEM;
1960 for (i = 0; i < size; i++)
1961 smmu->s2crs[i] = s2cr_init_val;
1962
Robin Murphy21174242016-09-12 17:13:48 +01001963 smmu->num_mapping_groups = size;
Robin Murphy588888a2016-09-12 17:13:54 +01001964 mutex_init(&smmu->stream_map_mutex);
Will Deacon8e517e72017-07-06 15:55:48 +01001965 spin_lock_init(&smmu->global_sync_lock);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001966
Robin Murphy7602b872016-04-28 17:12:09 +01001967 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1968 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1969 if (!(id & ID0_PTFS_NO_AARCH32S))
1970 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1971 }
1972
Will Deacon45ae7cf2013-06-24 18:31:25 +01001973 /* ID1 */
1974 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001975 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001976
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001977 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001978 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Robin Murphy452107c2017-03-30 17:56:30 +01001979 size <<= smmu->pgshift;
1980 if (smmu->cb_base != gr0_base + size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001981 dev_warn(smmu->dev,
Robin Murphy452107c2017-03-30 17:56:30 +01001982 "SMMU address space size (0x%lx) differs from mapped region size (0x%tx)!\n",
1983 size * 2, (smmu->cb_base - gr0_base) * 2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001984
Will Deacon518f7132014-11-14 17:17:54 +00001985 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001986 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1987 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1988 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1989 return -ENODEV;
1990 }
1991 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1992 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001993 /*
1994 * Cavium CN88xx erratum #27704.
1995 * Ensure ASID and VMID allocation is unique across all SMMUs in
1996 * the system.
1997 */
1998 if (smmu->model == CAVIUM_SMMUV2) {
1999 smmu->cavium_id_base =
2000 atomic_add_return(smmu->num_context_banks,
2001 &cavium_smmu_context_count);
2002 smmu->cavium_id_base -= smmu->num_context_banks;
Robert Richter53c35dce2017-03-13 11:39:01 +01002003 dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
Robin Murphye086d912016-04-13 18:12:58 +01002004 }
Robin Murphy90df3732017-08-08 14:56:14 +01002005 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
2006 sizeof(*smmu->cbs), GFP_KERNEL);
2007 if (!smmu->cbs)
2008 return -ENOMEM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002009
2010 /* ID2 */
2011 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
2012 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00002013 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002014
Will Deacon518f7132014-11-14 17:17:54 +00002015 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01002016 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00002017 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002018
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08002019 if (id & ID2_VMID16)
2020 smmu->features |= ARM_SMMU_FEAT_VMID16;
2021
Robin Murphyf1d84542015-03-04 16:41:05 +00002022 /*
2023 * What the page table walker can address actually depends on which
2024 * descriptor format is in use, but since a) we don't know that yet,
2025 * and b) it can vary per context bank, this will have to do...
2026 */
2027 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
2028 dev_warn(smmu->dev,
2029 "failed to set DMA mask for table walker\n");
2030
Robin Murphyb7862e32016-04-13 18:13:03 +01002031 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00002032 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01002033 if (smmu->version == ARM_SMMU_V1_64K)
2034 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002035 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002036 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00002037 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00002038 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01002039 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00002040 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01002041 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00002042 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01002043 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002044 }
2045
Robin Murphy7602b872016-04-28 17:12:09 +01002046 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01002047 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01002048 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01002049 if (smmu->features &
2050 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01002051 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01002052 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01002053 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01002054 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01002055 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01002056
Robin Murphyd5466352016-05-09 17:20:09 +01002057 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2058 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2059 else
2060 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2061 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
2062 smmu->pgsize_bitmap);
2063
Will Deacon518f7132014-11-14 17:17:54 +00002064
Will Deacon28d60072014-09-01 16:24:48 +01002065 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
2066 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002067 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002068
2069 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
2070 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002071 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002072
Will Deacon45ae7cf2013-06-24 18:31:25 +01002073 return 0;
2074}
2075
Robin Murphy67b65a32016-04-13 18:12:57 +01002076struct arm_smmu_match_data {
2077 enum arm_smmu_arch_version version;
2078 enum arm_smmu_implementation model;
2079};
2080
2081#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
Sricharan R96a299d2018-12-04 11:52:09 +05302082static const struct arm_smmu_match_data name = { .version = ver, .model = imp }
Robin Murphy67b65a32016-04-13 18:12:57 +01002083
2084ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
2085ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01002086ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002087ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01002088ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Vivek Gautam89cddc52018-12-04 11:52:13 +05302089ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01002090
Joerg Roedel09b52692014-10-02 12:24:45 +02002091static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01002092 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
2093 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
2094 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01002095 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002096 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01002097 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Vivek Gautam89cddc52018-12-04 11:52:13 +05302098 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01002099 { },
2100};
Robin Murphy09360402014-08-28 17:51:59 +01002101
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002102#ifdef CONFIG_ACPI
2103static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
2104{
2105 int ret = 0;
2106
2107 switch (model) {
2108 case ACPI_IORT_SMMU_V1:
2109 case ACPI_IORT_SMMU_CORELINK_MMU400:
2110 smmu->version = ARM_SMMU_V1;
2111 smmu->model = GENERIC_SMMU;
2112 break;
Robin Murphy84c24372017-06-19 16:41:56 +01002113 case ACPI_IORT_SMMU_CORELINK_MMU401:
2114 smmu->version = ARM_SMMU_V1_64K;
2115 smmu->model = GENERIC_SMMU;
2116 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002117 case ACPI_IORT_SMMU_V2:
2118 smmu->version = ARM_SMMU_V2;
2119 smmu->model = GENERIC_SMMU;
2120 break;
2121 case ACPI_IORT_SMMU_CORELINK_MMU500:
2122 smmu->version = ARM_SMMU_V2;
2123 smmu->model = ARM_MMU500;
2124 break;
Robin Murphy84c24372017-06-19 16:41:56 +01002125 case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
2126 smmu->version = ARM_SMMU_V2;
2127 smmu->model = CAVIUM_SMMUV2;
2128 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002129 default:
2130 ret = -ENODEV;
2131 }
2132
2133 return ret;
2134}
2135
2136static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2137 struct arm_smmu_device *smmu)
2138{
2139 struct device *dev = smmu->dev;
2140 struct acpi_iort_node *node =
2141 *(struct acpi_iort_node **)dev_get_platdata(dev);
2142 struct acpi_iort_smmu *iort_smmu;
2143 int ret;
2144
2145 /* Retrieve SMMU1/2 specific data */
2146 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
2147
2148 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
2149 if (ret < 0)
2150 return ret;
2151
2152 /* Ignore the configuration access interrupt */
2153 smmu->num_global_irqs = 1;
2154
2155 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
2156 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2157
2158 return 0;
2159}
2160#else
2161static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2162 struct arm_smmu_device *smmu)
2163{
2164 return -ENODEV;
2165}
2166#endif
2167
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002168static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2169 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002170{
Robin Murphy67b65a32016-04-13 18:12:57 +01002171 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002172 struct device *dev = &pdev->dev;
Robin Murphy021bb842016-09-14 15:26:46 +01002173 bool legacy_binding;
2174
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002175 if (of_property_read_u32(dev->of_node, "#global-interrupts",
2176 &smmu->num_global_irqs)) {
2177 dev_err(dev, "missing #global-interrupts property\n");
2178 return -ENODEV;
2179 }
2180
2181 data = of_device_get_match_data(dev);
2182 smmu->version = data->version;
2183 smmu->model = data->model;
2184
2185 parse_driver_options(smmu);
2186
Robin Murphy021bb842016-09-14 15:26:46 +01002187 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
2188 if (legacy_binding && !using_generic_binding) {
2189 if (!using_legacy_binding)
2190 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
2191 using_legacy_binding = true;
2192 } else if (!legacy_binding && !using_legacy_binding) {
2193 using_generic_binding = true;
2194 } else {
2195 dev_err(dev, "not probing due to mismatched DT properties\n");
2196 return -ENODEV;
2197 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002198
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002199 if (of_dma_is_coherent(dev->of_node))
2200 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2201
2202 return 0;
2203}
2204
Robin Murphyf6810c12017-04-10 16:51:05 +05302205static void arm_smmu_bus_init(void)
2206{
2207 /* Oh, for a proper bus abstraction */
2208 if (!iommu_present(&platform_bus_type))
2209 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2210#ifdef CONFIG_ARM_AMBA
2211 if (!iommu_present(&amba_bustype))
2212 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2213#endif
2214#ifdef CONFIG_PCI
2215 if (!iommu_present(&pci_bus_type)) {
2216 pci_request_acs();
2217 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2218 }
2219#endif
Nipun Guptaeab03e22018-09-10 19:19:18 +05302220#ifdef CONFIG_FSL_MC_BUS
2221 if (!iommu_present(&fsl_mc_bus_type))
2222 bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
2223#endif
Robin Murphyf6810c12017-04-10 16:51:05 +05302224}
2225
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002226static int arm_smmu_device_probe(struct platform_device *pdev)
2227{
2228 struct resource *res;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002229 resource_size_t ioaddr;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002230 struct arm_smmu_device *smmu;
2231 struct device *dev = &pdev->dev;
2232 int num_irqs, i, err;
2233
Will Deacon45ae7cf2013-06-24 18:31:25 +01002234 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2235 if (!smmu) {
2236 dev_err(dev, "failed to allocate arm_smmu_device\n");
2237 return -ENOMEM;
2238 }
2239 smmu->dev = dev;
2240
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002241 if (dev->of_node)
2242 err = arm_smmu_device_dt_probe(pdev, smmu);
2243 else
2244 err = arm_smmu_device_acpi_probe(pdev, smmu);
2245
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002246 if (err)
2247 return err;
Robin Murphy09360402014-08-28 17:51:59 +01002248
Will Deacon45ae7cf2013-06-24 18:31:25 +01002249 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002250 ioaddr = res->start;
Julia Lawall8a7f4312013-08-19 12:20:37 +01002251 smmu->base = devm_ioremap_resource(dev, res);
2252 if (IS_ERR(smmu->base))
2253 return PTR_ERR(smmu->base);
Robin Murphy452107c2017-03-30 17:56:30 +01002254 smmu->cb_base = smmu->base + resource_size(res) / 2;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002255
Will Deacon45ae7cf2013-06-24 18:31:25 +01002256 num_irqs = 0;
2257 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2258 num_irqs++;
2259 if (num_irqs > smmu->num_global_irqs)
2260 smmu->num_context_irqs++;
2261 }
2262
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002263 if (!smmu->num_context_irqs) {
2264 dev_err(dev, "found %d interrupts but expected at least %d\n",
2265 num_irqs, smmu->num_global_irqs + 1);
2266 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002267 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002268
Kees Cooka86854d2018-06-12 14:07:58 -07002269 smmu->irqs = devm_kcalloc(dev, num_irqs, sizeof(*smmu->irqs),
Will Deacon45ae7cf2013-06-24 18:31:25 +01002270 GFP_KERNEL);
2271 if (!smmu->irqs) {
2272 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2273 return -ENOMEM;
2274 }
2275
2276 for (i = 0; i < num_irqs; ++i) {
2277 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002278
Will Deacon45ae7cf2013-06-24 18:31:25 +01002279 if (irq < 0) {
2280 dev_err(dev, "failed to get irq index %d\n", i);
2281 return -ENODEV;
2282 }
2283 smmu->irqs[i] = irq;
2284 }
2285
Sricharan R96a299d2018-12-04 11:52:09 +05302286 err = devm_clk_bulk_get_all(dev, &smmu->clks);
2287 if (err < 0) {
2288 dev_err(dev, "failed to get clocks %d\n", err);
2289 return err;
2290 }
2291 smmu->num_clks = err;
2292
2293 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
2294 if (err)
2295 return err;
2296
Olav Haugan3c8766d2014-08-22 17:12:32 -07002297 err = arm_smmu_device_cfg_probe(smmu);
2298 if (err)
2299 return err;
2300
Vivek Gautamd1e20222018-07-19 23:23:56 +05302301 if (smmu->version == ARM_SMMU_V2) {
2302 if (smmu->num_context_banks > smmu->num_context_irqs) {
2303 dev_err(dev,
2304 "found only %d context irq(s) but %d required\n",
2305 smmu->num_context_irqs, smmu->num_context_banks);
2306 return -ENODEV;
2307 }
2308
2309 /* Ignore superfluous interrupts */
2310 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002311 }
2312
Will Deacon45ae7cf2013-06-24 18:31:25 +01002313 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08002314 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2315 arm_smmu_global_fault,
2316 IRQF_SHARED,
2317 "arm-smmu global fault",
2318 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002319 if (err) {
2320 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2321 i, smmu->irqs[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01002322 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002323 }
2324 }
2325
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002326 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2327 "smmu.%pa", &ioaddr);
2328 if (err) {
2329 dev_err(dev, "Failed to register iommu in sysfs\n");
2330 return err;
2331 }
2332
2333 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2334 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2335
2336 err = iommu_device_register(&smmu->iommu);
2337 if (err) {
2338 dev_err(dev, "Failed to register iommu\n");
2339 return err;
2340 }
2341
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002342 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01002343 arm_smmu_device_reset(smmu);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03002344 arm_smmu_test_smr_masks(smmu);
Robin Murphy021bb842016-09-14 15:26:46 +01002345
Robin Murphyf6810c12017-04-10 16:51:05 +05302346 /*
Sricharan Rd4a44f02018-12-04 11:52:10 +05302347 * We want to avoid touching dev->power.lock in fastpaths unless
2348 * it's really going to do something useful - pm_runtime_enabled()
2349 * can serve as an ideal proxy for that decision. So, conditionally
2350 * enable pm_runtime.
2351 */
2352 if (dev->pm_domain) {
2353 pm_runtime_set_active(dev);
2354 pm_runtime_enable(dev);
2355 }
2356
2357 /*
Robin Murphyf6810c12017-04-10 16:51:05 +05302358 * For ACPI and generic DT bindings, an SMMU will be probed before
2359 * any device which might need it, so we want the bus ops in place
2360 * ready to handle default domain setup as soon as any SMMU exists.
2361 */
2362 if (!using_legacy_binding)
2363 arm_smmu_bus_init();
2364
Will Deacon45ae7cf2013-06-24 18:31:25 +01002365 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002366}
2367
Robin Murphyf6810c12017-04-10 16:51:05 +05302368/*
2369 * With the legacy DT binding in play, though, we have no guarantees about
2370 * probe order, but then we're also not doing default domains, so we can
2371 * delay setting bus ops until we're sure every possible SMMU is ready,
2372 * and that way ensure that no add_device() calls get missed.
2373 */
2374static int arm_smmu_legacy_bus_init(void)
2375{
2376 if (using_legacy_binding)
2377 arm_smmu_bus_init();
2378 return 0;
2379}
2380device_initcall_sync(arm_smmu_legacy_bus_init);
2381
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002382static void arm_smmu_device_shutdown(struct platform_device *pdev)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002383{
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002384 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002385
2386 if (!smmu)
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002387 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002388
Will Deaconecfadb62013-07-31 19:21:28 +01002389 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002390 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002391
Sricharan Rd4a44f02018-12-04 11:52:10 +05302392 arm_smmu_rpm_get(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002393 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002394 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Sricharan Rd4a44f02018-12-04 11:52:10 +05302395 arm_smmu_rpm_put(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302396
Sricharan Rd4a44f02018-12-04 11:52:10 +05302397 if (pm_runtime_enabled(smmu->dev))
2398 pm_runtime_force_suspend(smmu->dev);
2399 else
2400 clk_bulk_disable(smmu->num_clks, smmu->clks);
2401
2402 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
Nate Watterson7aa86192017-06-29 18:18:15 -04002403}
2404
Sricharan R96a299d2018-12-04 11:52:09 +05302405static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
Robin Murphya2d866f2017-08-08 14:56:15 +01002406{
2407 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
Sricharan R96a299d2018-12-04 11:52:09 +05302408 int ret;
2409
2410 ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
2411 if (ret)
2412 return ret;
Robin Murphya2d866f2017-08-08 14:56:15 +01002413
2414 arm_smmu_device_reset(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302415
Will Deacon45ae7cf2013-06-24 18:31:25 +01002416 return 0;
2417}
2418
Sricharan R96a299d2018-12-04 11:52:09 +05302419static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
Dan Carpenter6614ee72013-08-21 09:34:20 +01002420{
Sricharan R96a299d2018-12-04 11:52:09 +05302421 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2422
2423 clk_bulk_disable(smmu->num_clks, smmu->clks);
2424
2425 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002426}
2427
Robin Murphya2d866f2017-08-08 14:56:15 +01002428static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
2429{
Sricharan R96a299d2018-12-04 11:52:09 +05302430 if (pm_runtime_suspended(dev))
2431 return 0;
Robin Murphya2d866f2017-08-08 14:56:15 +01002432
Sricharan R96a299d2018-12-04 11:52:09 +05302433 return arm_smmu_runtime_resume(dev);
Robin Murphya2d866f2017-08-08 14:56:15 +01002434}
2435
Sricharan R96a299d2018-12-04 11:52:09 +05302436static int __maybe_unused arm_smmu_pm_suspend(struct device *dev)
2437{
2438 if (pm_runtime_suspended(dev))
2439 return 0;
2440
2441 return arm_smmu_runtime_suspend(dev);
2442}
2443
2444static const struct dev_pm_ops arm_smmu_pm_ops = {
2445 SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_pm_suspend, arm_smmu_pm_resume)
2446 SET_RUNTIME_PM_OPS(arm_smmu_runtime_suspend,
2447 arm_smmu_runtime_resume, NULL)
2448};
Robin Murphya2d866f2017-08-08 14:56:15 +01002449
Will Deacon45ae7cf2013-06-24 18:31:25 +01002450static struct platform_driver arm_smmu_driver = {
2451 .driver = {
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002452 .name = "arm-smmu",
2453 .of_match_table = of_match_ptr(arm_smmu_of_match),
2454 .pm = &arm_smmu_pm_ops,
2455 .suppress_bind_attrs = true,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002456 },
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002457 .probe = arm_smmu_device_probe,
Nate Watterson7aa86192017-06-29 18:18:15 -04002458 .shutdown = arm_smmu_device_shutdown,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002459};
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002460builtin_platform_driver(arm_smmu_driver);