blob: 5aeb1dbfaa08910a7b2869366fbc441b8561ad29 [file] [log] [blame]
Thomas Gleixner45051532019-05-29 16:57:47 -07001// SPDX-License-Identifier: GPL-2.0-only
Will Deacon45ae7cf2013-06-24 18:31:25 +01002/*
3 * IOMMU API for ARM architected SMMU implementations.
4 *
Will Deacon45ae7cf2013-06-24 18:31:25 +01005 * Copyright (C) 2013 ARM Limited
6 *
7 * Author: Will Deacon <will.deacon@arm.com>
8 *
9 * This driver currently supports:
10 * - SMMUv1 and v2 implementations
11 * - Stream-matching and stream-indexing
12 * - v7/v8 long-descriptor format
13 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010014 * - Context fault reporting
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +030015 * - Extended Stream ID (16 bit)
Will Deacon45ae7cf2013-06-24 18:31:25 +010016 */
17
18#define pr_fmt(fmt) "arm-smmu: " fmt
19
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +000020#include <linux/acpi.h>
21#include <linux/acpi_iort.h>
Robin Murphy1f3d5ca2016-09-12 17:13:49 +010022#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010023#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000024#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010025#include <linux/dma-mapping.h>
26#include <linux/err.h>
27#include <linux/interrupt.h>
28#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010029#include <linux/io-64-nonatomic-hi-lo.h>
Rob Herringb77cf112019-02-05 10:37:31 -060030#include <linux/io-pgtable.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010031#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000032#include <linux/iopoll.h>
Paul Gortmakeraddb672f2018-12-01 14:19:16 -050033#include <linux/init.h>
34#include <linux/moduleparam.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010035#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010036#include <linux/of_address.h>
Robin Murphyd6fc5d92016-09-12 17:13:52 +010037#include <linux/of_device.h>
Robin Murphyadfec2e2016-09-12 17:13:55 +010038#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010039#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010040#include <linux/platform_device.h>
Sricharan R96a299d2018-12-04 11:52:09 +053041#include <linux/pm_runtime.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010042#include <linux/slab.h>
43#include <linux/spinlock.h>
44
45#include <linux/amba/bus.h>
Nipun Guptaeab03e22018-09-10 19:19:18 +053046#include <linux/fsl/mc.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010047
Rob Clark2b037742017-08-09 10:43:03 -040048#include "arm-smmu-regs.h"
49
50#define ARM_MMU500_ACTLR_CPRE (1 << 1)
51
52#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
Feng Kan74f55d32017-10-11 15:08:39 -070053#define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10)
Rob Clark2b037742017-08-09 10:43:03 -040054#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
55
56#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
57#define TLB_SPIN_COUNT 10
Will Deacon45ae7cf2013-06-24 18:31:25 +010058
Will Deacon45ae7cf2013-06-24 18:31:25 +010059/* Maximum number of context banks per SMMU */
60#define ARM_SMMU_MAX_CBS 128
61
Will Deacon45ae7cf2013-06-24 18:31:25 +010062/* SMMU global address space */
63#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010064#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010065
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000066/*
67 * SMMU global address space with conditional offset to access secure
68 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
69 * nsGFSYNR0: 0x450)
70 */
71#define ARM_SMMU_GR0_NS(smmu) \
72 ((smmu)->base + \
73 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
74 ? 0x400 : 0))
75
Robin Murphyf9a05f02016-04-13 18:13:01 +010076/*
77 * Some 64-bit registers only make sense to write atomically, but in such
78 * cases all the data relevant to AArch32 formats lies within the lower word,
79 * therefore this actually makes more sense than it might first appear.
80 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010081#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010082#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010083#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010084#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010085#endif
86
Will Deacon45ae7cf2013-06-24 18:31:25 +010087/* Translation context bank */
Robin Murphy452107c2017-03-30 17:56:30 +010088#define ARM_SMMU_CB(smmu, n) ((smmu)->cb_base + ((n) << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010089
Eric Augerf3ebee82017-01-19 20:57:55 +000090#define MSI_IOVA_BASE 0x8000000
91#define MSI_IOVA_LENGTH 0x100000
92
Will Deacon4cf740b2014-07-14 19:47:39 +010093static int force_stage;
Paul Gortmakeraddb672f2018-12-01 14:19:16 -050094/*
95 * not really modular, but the easiest way to keep compat with existing
96 * bootargs behaviour is to continue using module_param() here.
97 */
Robin Murphy25a1c962016-02-10 14:25:33 +000098module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +010099MODULE_PARM_DESC(force_stage,
100 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Douglas Anderson954a03b2019-03-01 11:20:17 -0800101static bool disable_bypass =
102 IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT);
Robin Murphy25a1c962016-02-10 14:25:33 +0000103module_param(disable_bypass, bool, S_IRUGO);
104MODULE_PARM_DESC(disable_bypass,
105 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100106
Robin Murphy09360402014-08-28 17:51:59 +0100107enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100108 ARM_SMMU_V1,
109 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100110 ARM_SMMU_V2,
111};
112
Robin Murphy67b65a32016-04-13 18:12:57 +0100113enum arm_smmu_implementation {
114 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100115 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100116 CAVIUM_SMMUV2,
Vivek Gautam89cddc52018-12-04 11:52:13 +0530117 QCOM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100118};
119
Robin Murphy8e8b2032016-09-12 17:13:50 +0100120struct arm_smmu_s2cr {
Robin Murphy588888a2016-09-12 17:13:54 +0100121 struct iommu_group *group;
122 int count;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100123 enum arm_smmu_s2cr_type type;
124 enum arm_smmu_s2cr_privcfg privcfg;
125 u8 cbndx;
126};
127
128#define s2cr_init_val (struct arm_smmu_s2cr){ \
129 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
130}
131
Will Deacon45ae7cf2013-06-24 18:31:25 +0100132struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100133 u16 mask;
134 u16 id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100135 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100136};
137
Robin Murphy90df3732017-08-08 14:56:14 +0100138struct arm_smmu_cb {
139 u64 ttbr[2];
140 u32 tcr[2];
141 u32 mair[2];
142 struct arm_smmu_cfg *cfg;
143};
144
Will Deacona9a1b0b2014-05-01 18:05:08 +0100145struct arm_smmu_master_cfg {
Robin Murphyf80cd882016-09-14 15:21:39 +0100146 struct arm_smmu_device *smmu;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100147 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100148};
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100149#define INVALID_SMENDX -1
Robin Murphyadfec2e2016-09-12 17:13:55 +0100150#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
151#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000152#define fwspec_smendx(fw, i) \
153 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphyadfec2e2016-09-12 17:13:55 +0100154#define for_each_cfg_sme(fw, i, idx) \
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000155 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100156
157struct arm_smmu_device {
158 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100159
160 void __iomem *base;
Robin Murphy452107c2017-03-30 17:56:30 +0100161 void __iomem *cb_base;
Will Deaconc757e852014-07-30 11:33:25 +0100162 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100163
164#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
165#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
166#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
167#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
168#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000169#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800170#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100171#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
172#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
173#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
174#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
175#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300176#define ARM_SMMU_FEAT_EXIDS (1 << 12)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100177 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000178
179#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
180 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100181 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100182 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100183
184 u32 num_context_banks;
185 u32 num_s2_context_banks;
186 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
Robin Murphy90df3732017-08-08 14:56:14 +0100187 struct arm_smmu_cb *cbs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100188 atomic_t irptndx;
189
190 u32 num_mapping_groups;
Robin Murphy21174242016-09-12 17:13:48 +0100191 u16 streamid_mask;
192 u16 smr_mask_mask;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100193 struct arm_smmu_smr *smrs;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100194 struct arm_smmu_s2cr *s2crs;
Robin Murphy588888a2016-09-12 17:13:54 +0100195 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100196
Will Deacon518f7132014-11-14 17:17:54 +0000197 unsigned long va_size;
198 unsigned long ipa_size;
199 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100200 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100201
202 u32 num_global_irqs;
203 u32 num_context_irqs;
204 unsigned int *irqs;
Sricharan R96a299d2018-12-04 11:52:09 +0530205 struct clk_bulk_data *clks;
206 int num_clks;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100207
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800208 u32 cavium_id_base; /* Specific to Cavium */
Joerg Roedel9648cbc2017-02-01 18:11:36 +0100209
Will Deacon8e517e72017-07-06 15:55:48 +0100210 spinlock_t global_sync_lock;
211
Joerg Roedel9648cbc2017-02-01 18:11:36 +0100212 /* IOMMU core code handle */
213 struct iommu_device iommu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100214};
215
Robin Murphy7602b872016-04-28 17:12:09 +0100216enum arm_smmu_context_fmt {
217 ARM_SMMU_CTX_FMT_NONE,
218 ARM_SMMU_CTX_FMT_AARCH64,
219 ARM_SMMU_CTX_FMT_AARCH32_L,
220 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100221};
222
223struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100224 u8 cbndx;
225 u8 irptndx;
Robin Murphy280b6832017-03-30 17:56:29 +0100226 union {
227 u16 asid;
228 u16 vmid;
229 };
Will Deacon45ae7cf2013-06-24 18:31:25 +0100230 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100231 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100232};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100233#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100234
Will Deaconc752ce42014-06-25 22:46:31 +0100235enum arm_smmu_domain_stage {
236 ARM_SMMU_DOMAIN_S1 = 0,
237 ARM_SMMU_DOMAIN_S2,
238 ARM_SMMU_DOMAIN_NESTED,
Will Deacon61bc6712017-01-06 16:56:03 +0000239 ARM_SMMU_DOMAIN_BYPASS,
Will Deaconc752ce42014-06-25 22:46:31 +0100240};
241
Will Deacon45ae7cf2013-06-24 18:31:25 +0100242struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100243 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000244 struct io_pgtable_ops *pgtbl_ops;
Robin Murphy32b12442017-09-28 15:55:01 +0100245 const struct iommu_gather_ops *tlb_ops;
Will Deacon44680ee2014-06-25 11:29:12 +0100246 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100247 enum arm_smmu_domain_stage stage;
Robin Murphy44f68762018-09-20 17:10:27 +0100248 bool non_strict;
Will Deacon518f7132014-11-14 17:17:54 +0000249 struct mutex init_mutex; /* Protects smmu pointer */
Will Deacon8e517e72017-07-06 15:55:48 +0100250 spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
Joerg Roedel1d672632015-03-26 13:43:10 +0100251 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100252};
253
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000254struct arm_smmu_option_prop {
255 u32 opt;
256 const char *prop;
257};
258
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800259static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
260
Robin Murphy021bb842016-09-14 15:26:46 +0100261static bool using_legacy_binding, using_generic_binding;
262
Mitchel Humpherys29073202014-07-08 09:52:18 -0700263static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000264 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
265 { 0, NULL},
266};
267
Sricharan Rd4a44f02018-12-04 11:52:10 +0530268static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
269{
270 if (pm_runtime_enabled(smmu->dev))
271 return pm_runtime_get_sync(smmu->dev);
272
273 return 0;
274}
275
276static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
277{
278 if (pm_runtime_enabled(smmu->dev))
279 pm_runtime_put(smmu->dev);
280}
281
Joerg Roedel1d672632015-03-26 13:43:10 +0100282static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
283{
284 return container_of(dom, struct arm_smmu_domain, domain);
285}
286
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000287static void parse_driver_options(struct arm_smmu_device *smmu)
288{
289 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700290
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000291 do {
292 if (of_property_read_bool(smmu->dev->of_node,
293 arm_smmu_options[i].prop)) {
294 smmu->options |= arm_smmu_options[i].opt;
295 dev_notice(smmu->dev, "option %s\n",
296 arm_smmu_options[i].prop);
297 }
298 } while (arm_smmu_options[++i].opt);
299}
300
Will Deacon8f68f8e2014-07-15 11:27:08 +0100301static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100302{
303 if (dev_is_pci(dev)) {
304 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700305
Will Deacona9a1b0b2014-05-01 18:05:08 +0100306 while (!pci_is_root_bus(bus))
307 bus = bus->parent;
Robin Murphyf80cd882016-09-14 15:21:39 +0100308 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100309 }
310
Robin Murphyf80cd882016-09-14 15:21:39 +0100311 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100312}
313
Robin Murphyf80cd882016-09-14 15:21:39 +0100314static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100315{
Robin Murphyf80cd882016-09-14 15:21:39 +0100316 *((__be32 *)data) = cpu_to_be32(alias);
317 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100318}
319
Robin Murphyf80cd882016-09-14 15:21:39 +0100320static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100321{
Robin Murphyf80cd882016-09-14 15:21:39 +0100322 struct of_phandle_iterator *it = *(void **)data;
323 struct device_node *np = it->node;
324 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100325
Robin Murphyf80cd882016-09-14 15:21:39 +0100326 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
327 "#stream-id-cells", 0)
328 if (it->node == np) {
329 *(void **)data = dev;
330 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700331 }
Robin Murphyf80cd882016-09-14 15:21:39 +0100332 it->node = np;
333 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100334}
335
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100336static struct platform_driver arm_smmu_driver;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100337static struct iommu_ops arm_smmu_ops;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100338
Robin Murphyadfec2e2016-09-12 17:13:55 +0100339static int arm_smmu_register_legacy_master(struct device *dev,
340 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100341{
Robin Murphyadfec2e2016-09-12 17:13:55 +0100342 struct device *smmu_dev;
Robin Murphyf80cd882016-09-14 15:21:39 +0100343 struct device_node *np;
344 struct of_phandle_iterator it;
345 void *data = &it;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100346 u32 *sids;
Robin Murphyf80cd882016-09-14 15:21:39 +0100347 __be32 pci_sid;
348 int err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100349
Robin Murphyf80cd882016-09-14 15:21:39 +0100350 np = dev_get_dev_node(dev);
351 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
352 of_node_put(np);
353 return -ENODEV;
354 }
355
356 it.node = np;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100357 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
358 __find_legacy_master_phandle);
Robin Murphyadfec2e2016-09-12 17:13:55 +0100359 smmu_dev = data;
Robin Murphyf80cd882016-09-14 15:21:39 +0100360 of_node_put(np);
361 if (err == 0)
362 return -ENODEV;
363 if (err < 0)
364 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100365
Robin Murphyf80cd882016-09-14 15:21:39 +0100366 if (dev_is_pci(dev)) {
367 /* "mmu-masters" assumes Stream ID == Requester ID */
368 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
369 &pci_sid);
370 it.cur = &pci_sid;
371 it.cur_count = 1;
372 }
373
Robin Murphyadfec2e2016-09-12 17:13:55 +0100374 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
375 &arm_smmu_ops);
376 if (err)
377 return err;
378
379 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
380 if (!sids)
Robin Murphyf80cd882016-09-14 15:21:39 +0100381 return -ENOMEM;
382
Robin Murphyadfec2e2016-09-12 17:13:55 +0100383 *smmu = dev_get_drvdata(smmu_dev);
384 of_phandle_iterator_args(&it, sids, it.cur_count);
385 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
386 kfree(sids);
387 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100388}
389
390static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
391{
392 int idx;
393
394 do {
395 idx = find_next_zero_bit(map, end, start);
396 if (idx == end)
397 return -ENOSPC;
398 } while (test_and_set_bit(idx, map));
399
400 return idx;
401}
402
403static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
404{
405 clear_bit(idx, map);
406}
407
408/* Wait for any pending TLB invalidations to complete */
Robin Murphy11febfc2017-03-30 17:56:31 +0100409static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
410 void __iomem *sync, void __iomem *status)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100411{
Robin Murphy8513c892017-03-30 17:56:32 +0100412 unsigned int spin_cnt, delay;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100413
Robin Murphy11febfc2017-03-30 17:56:31 +0100414 writel_relaxed(0, sync);
Robin Murphy8513c892017-03-30 17:56:32 +0100415 for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
416 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
417 if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
418 return;
419 cpu_relax();
Will Deacon45ae7cf2013-06-24 18:31:25 +0100420 }
Robin Murphy8513c892017-03-30 17:56:32 +0100421 udelay(delay);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100422 }
Robin Murphy8513c892017-03-30 17:56:32 +0100423 dev_err_ratelimited(smmu->dev,
424 "TLB sync timed out -- SMMU may be deadlocked\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +0100425}
426
Robin Murphy11febfc2017-03-30 17:56:31 +0100427static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
Will Deacon1463fe42013-07-31 19:21:27 +0100428{
Robin Murphy11febfc2017-03-30 17:56:31 +0100429 void __iomem *base = ARM_SMMU_GR0(smmu);
Will Deacon8e517e72017-07-06 15:55:48 +0100430 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100431
Will Deacon8e517e72017-07-06 15:55:48 +0100432 spin_lock_irqsave(&smmu->global_sync_lock, flags);
Robin Murphy11febfc2017-03-30 17:56:31 +0100433 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
434 base + ARM_SMMU_GR0_sTLBGSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100435 spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000436}
437
Robin Murphy11febfc2017-03-30 17:56:31 +0100438static void arm_smmu_tlb_sync_context(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100439{
Will Deacon518f7132014-11-14 17:17:54 +0000440 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy11febfc2017-03-30 17:56:31 +0100441 struct arm_smmu_device *smmu = smmu_domain->smmu;
442 void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
Will Deacon8e517e72017-07-06 15:55:48 +0100443 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100444
Will Deacon8e517e72017-07-06 15:55:48 +0100445 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy11febfc2017-03-30 17:56:31 +0100446 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
447 base + ARM_SMMU_CB_TLBSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100448 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000449}
450
Robin Murphy11febfc2017-03-30 17:56:31 +0100451static void arm_smmu_tlb_sync_vmid(void *cookie)
452{
453 struct arm_smmu_domain *smmu_domain = cookie;
454
455 arm_smmu_tlb_sync_global(smmu_domain->smmu);
456}
457
458static void arm_smmu_tlb_inv_context_s1(void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000459{
460 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100461 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy11febfc2017-03-30 17:56:31 +0100462 void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
463
Robin Murphy44f68762018-09-20 17:10:27 +0100464 /*
465 * NOTE: this is not a relaxed write; it needs to guarantee that PTEs
466 * cleared by the current CPU are visible to the SMMU before the TLBI.
467 */
468 writel(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
Robin Murphy11febfc2017-03-30 17:56:31 +0100469 arm_smmu_tlb_sync_context(cookie);
470}
471
472static void arm_smmu_tlb_inv_context_s2(void *cookie)
473{
474 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100475 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy11febfc2017-03-30 17:56:31 +0100476 void __iomem *base = ARM_SMMU_GR0(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100477
Robin Murphy44f68762018-09-20 17:10:27 +0100478 /* NOTE: see above */
479 writel(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
Robin Murphy11febfc2017-03-30 17:56:31 +0100480 arm_smmu_tlb_sync_global(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100481}
482
Will Deacon518f7132014-11-14 17:17:54 +0000483static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000484 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000485{
486 struct arm_smmu_domain *smmu_domain = cookie;
487 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000488 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Robin Murphy11febfc2017-03-30 17:56:31 +0100489 void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000490
Will Deacon7d321bd32018-10-01 12:42:49 +0100491 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
492 wmb();
493
Will Deacon518f7132014-11-14 17:17:54 +0000494 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +0000495 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
496
Robin Murphy7602b872016-04-28 17:12:09 +0100497 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000498 iova &= ~12UL;
Robin Murphy280b6832017-03-30 17:56:29 +0100499 iova |= cfg->asid;
Robin Murphy75df1382015-12-07 18:18:52 +0000500 do {
501 writel_relaxed(iova, reg);
502 iova += granule;
503 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000504 } else {
505 iova >>= 12;
Robin Murphy280b6832017-03-30 17:56:29 +0100506 iova |= (u64)cfg->asid << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000507 do {
508 writeq_relaxed(iova, reg);
509 iova += granule >> 12;
510 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000511 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100512 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000513 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
514 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000515 iova >>= 12;
516 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100517 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000518 iova += granule >> 12;
519 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000520 }
521}
522
Robin Murphy11febfc2017-03-30 17:56:31 +0100523/*
524 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
525 * almost negligible, but the benefit of getting the first one in as far ahead
526 * of the sync as possible is significant, hence we don't just make this a
527 * no-op and set .tlb_sync to arm_smmu_inv_context_s2() as you might think.
528 */
529static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
530 size_t granule, bool leaf, void *cookie)
531{
532 struct arm_smmu_domain *smmu_domain = cookie;
533 void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
534
Will Deacon7d321bd32018-10-01 12:42:49 +0100535 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
536 wmb();
537
Robin Murphy11febfc2017-03-30 17:56:31 +0100538 writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
539}
540
541static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = {
542 .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
Will Deacon518f7132014-11-14 17:17:54 +0000543 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
Robin Murphy11febfc2017-03-30 17:56:31 +0100544 .tlb_sync = arm_smmu_tlb_sync_context,
545};
546
547static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = {
548 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
549 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
550 .tlb_sync = arm_smmu_tlb_sync_context,
551};
552
553static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = {
554 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
555 .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync,
556 .tlb_sync = arm_smmu_tlb_sync_vmid,
Will Deacon518f7132014-11-14 17:17:54 +0000557};
558
Will Deacon45ae7cf2013-06-24 18:31:25 +0100559static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
560{
Vivek Gautambc580b52019-04-22 12:40:36 +0530561 u32 fsr, fsynr, cbfrsynra;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100562 unsigned long iova;
563 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100564 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100565 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
566 struct arm_smmu_device *smmu = smmu_domain->smmu;
Vivek Gautambc580b52019-04-22 12:40:36 +0530567 void __iomem *gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100568 void __iomem *cb_base;
569
Robin Murphy452107c2017-03-30 17:56:30 +0100570 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100571 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
572
573 if (!(fsr & FSR_FAULT))
574 return IRQ_NONE;
575
Will Deacon45ae7cf2013-06-24 18:31:25 +0100576 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Robin Murphyf9a05f02016-04-13 18:13:01 +0100577 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Vivek Gautambc580b52019-04-22 12:40:36 +0530578 cbfrsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100579
Will Deacon3714ce1d2016-08-05 19:49:45 +0100580 dev_err_ratelimited(smmu->dev,
Vivek Gautambc580b52019-04-22 12:40:36 +0530581 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
582 fsr, iova, fsynr, cbfrsynra, cfg->cbndx);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100583
Will Deacon45ae7cf2013-06-24 18:31:25 +0100584 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100585 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100586}
587
588static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
589{
590 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
591 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000592 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100593
594 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
595 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
596 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
597 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
598
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000599 if (!gfsr)
600 return IRQ_NONE;
601
Will Deacon45ae7cf2013-06-24 18:31:25 +0100602 dev_err_ratelimited(smmu->dev,
603 "Unexpected global fault, this could be serious\n");
604 dev_err_ratelimited(smmu->dev,
605 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
606 gfsr, gfsynr0, gfsynr1, gfsynr2);
607
608 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100609 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100610}
611
Will Deacon518f7132014-11-14 17:17:54 +0000612static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
613 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100614{
Will Deacon44680ee2014-06-25 11:29:12 +0100615 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy90df3732017-08-08 14:56:14 +0100616 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
617 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
618
619 cb->cfg = cfg;
620
621 /* TTBCR */
622 if (stage1) {
623 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
624 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
625 } else {
626 cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
627 cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
628 cb->tcr[1] |= TTBCR2_SEP_UPSTREAM;
629 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
630 cb->tcr[1] |= TTBCR2_AS;
631 }
632 } else {
633 cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
634 }
635
636 /* TTBRs */
637 if (stage1) {
638 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
639 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
640 cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
641 } else {
642 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
643 cb->ttbr[0] |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
644 cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
645 cb->ttbr[1] |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
646 }
647 } else {
648 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
649 }
650
651 /* MAIRs (stage-1 only) */
652 if (stage1) {
653 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
654 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
655 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
656 } else {
657 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
658 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
659 }
660 }
661}
662
663static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
664{
665 u32 reg;
666 bool stage1;
667 struct arm_smmu_cb *cb = &smmu->cbs[idx];
668 struct arm_smmu_cfg *cfg = cb->cfg;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100669 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100670
Robin Murphy90df3732017-08-08 14:56:14 +0100671 cb_base = ARM_SMMU_CB(smmu, idx);
672
673 /* Unassigned context banks only need disabling */
674 if (!cfg) {
675 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
676 return;
677 }
678
Will Deacon45ae7cf2013-06-24 18:31:25 +0100679 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100680 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100681
Robin Murphy90df3732017-08-08 14:56:14 +0100682 /* CBA2R */
Will Deacon4a1c93c2015-03-04 12:21:03 +0000683 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100684 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
685 reg = CBA2R_RW64_64BIT;
686 else
687 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800688 /* 16-bit VMIDs live in CBA2R */
689 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Robin Murphy280b6832017-03-30 17:56:29 +0100690 reg |= cfg->vmid << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800691
Robin Murphy90df3732017-08-08 14:56:14 +0100692 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(idx));
Will Deacon4a1c93c2015-03-04 12:21:03 +0000693 }
694
Will Deacon45ae7cf2013-06-24 18:31:25 +0100695 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100696 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100697 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700698 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100699
Will Deacon57ca90f2014-02-06 14:59:05 +0000700 /*
701 * Use the weakest shareability/memory types, so they are
702 * overridden by the ttbcr/pte.
703 */
704 if (stage1) {
705 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
706 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800707 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
708 /* 8-bit VMIDs live in CBAR */
Robin Murphy280b6832017-03-30 17:56:29 +0100709 reg |= cfg->vmid << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000710 }
Robin Murphy90df3732017-08-08 14:56:14 +0100711 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(idx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100712
Sunil Goutham125458a2017-03-28 16:11:12 +0530713 /*
714 * TTBCR
715 * We must write this before the TTBRs, since it determines the
716 * access behaviour of some fields (in particular, ASID[15:8]).
717 */
Robin Murphy90df3732017-08-08 14:56:14 +0100718 if (stage1 && smmu->version > ARM_SMMU_V1)
719 writel_relaxed(cb->tcr[1], cb_base + ARM_SMMU_CB_TTBCR2);
720 writel_relaxed(cb->tcr[0], cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100721
Will Deacon45ae7cf2013-06-24 18:31:25 +0100722 /* TTBRs */
Robin Murphy90df3732017-08-08 14:56:14 +0100723 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
724 writel_relaxed(cfg->asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
725 writel_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0);
726 writel_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100727 } else {
Robin Murphy90df3732017-08-08 14:56:14 +0100728 writeq_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0);
729 if (stage1)
730 writeq_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100731 }
732
Will Deacon518f7132014-11-14 17:17:54 +0000733 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100734 if (stage1) {
Robin Murphy90df3732017-08-08 14:56:14 +0100735 writel_relaxed(cb->mair[0], cb_base + ARM_SMMU_CB_S1_MAIR0);
736 writel_relaxed(cb->mair[1], cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100737 }
738
Will Deacon45ae7cf2013-06-24 18:31:25 +0100739 /* SCTLR */
Robin Murphy60705292016-08-11 17:44:06 +0100740 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100741 if (stage1)
742 reg |= SCTLR_S1_ASIDPNE;
Robin Murphy90df3732017-08-08 14:56:14 +0100743 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
744 reg |= SCTLR_E;
745
Will Deacon25724842013-08-21 13:49:53 +0100746 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100747}
748
749static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100750 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100751{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100752 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000753 unsigned long ias, oas;
754 struct io_pgtable_ops *pgtbl_ops;
755 struct io_pgtable_cfg pgtbl_cfg;
756 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100757 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100758 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100759
Will Deacon518f7132014-11-14 17:17:54 +0000760 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100761 if (smmu_domain->smmu)
762 goto out_unlock;
763
Will Deacon61bc6712017-01-06 16:56:03 +0000764 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
765 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
766 smmu_domain->smmu = smmu;
767 goto out_unlock;
768 }
769
Will Deaconc752ce42014-06-25 22:46:31 +0100770 /*
771 * Mapping the requested stage onto what we support is surprisingly
772 * complicated, mainly because the spec allows S1+S2 SMMUs without
773 * support for nested translation. That means we end up with the
774 * following table:
775 *
776 * Requested Supported Actual
777 * S1 N S1
778 * S1 S1+S2 S1
779 * S1 S2 S2
780 * S1 S1 S1
781 * N N N
782 * N S1+S2 S2
783 * N S2 S2
784 * N S1 S1
785 *
786 * Note that you can't actually request stage-2 mappings.
787 */
788 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
789 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
790 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
791 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
792
Robin Murphy7602b872016-04-28 17:12:09 +0100793 /*
794 * Choosing a suitable context format is even more fiddly. Until we
795 * grow some way for the caller to express a preference, and/or move
796 * the decision into the io-pgtable code where it arguably belongs,
797 * just aim for the closest thing to the rest of the system, and hope
798 * that the hardware isn't esoteric enough that we can't assume AArch64
799 * support to be a superset of AArch32 support...
800 */
801 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
802 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100803 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
804 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
805 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
806 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
807 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100808 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
809 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
810 ARM_SMMU_FEAT_FMT_AARCH64_16K |
811 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
812 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
813
814 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
815 ret = -EINVAL;
816 goto out_unlock;
817 }
818
Will Deaconc752ce42014-06-25 22:46:31 +0100819 switch (smmu_domain->stage) {
820 case ARM_SMMU_DOMAIN_S1:
821 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
822 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000823 ias = smmu->va_size;
824 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100825 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000826 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +0100827 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +0000828 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100829 ias = min(ias, 32UL);
830 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +0100831 } else {
832 fmt = ARM_V7S;
833 ias = min(ias, 32UL);
834 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +0100835 }
Robin Murphy32b12442017-09-28 15:55:01 +0100836 smmu_domain->tlb_ops = &arm_smmu_s1_tlb_ops;
Will Deaconc752ce42014-06-25 22:46:31 +0100837 break;
838 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100839 /*
840 * We will likely want to change this if/when KVM gets
841 * involved.
842 */
Will Deaconc752ce42014-06-25 22:46:31 +0100843 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100844 cfg->cbar = CBAR_TYPE_S2_TRANS;
845 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000846 ias = smmu->ipa_size;
847 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100848 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000849 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100850 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000851 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100852 ias = min(ias, 40UL);
853 oas = min(oas, 40UL);
854 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100855 if (smmu->version == ARM_SMMU_V2)
Robin Murphy32b12442017-09-28 15:55:01 +0100856 smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v2;
Robin Murphy11febfc2017-03-30 17:56:31 +0100857 else
Robin Murphy32b12442017-09-28 15:55:01 +0100858 smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v1;
Will Deaconc752ce42014-06-25 22:46:31 +0100859 break;
860 default:
861 ret = -EINVAL;
862 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100863 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100864 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
865 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200866 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100867 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100868
Will Deacon44680ee2014-06-25 11:29:12 +0100869 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100870 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100871 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
872 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100873 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100874 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100875 }
876
Robin Murphy280b6832017-03-30 17:56:29 +0100877 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
878 cfg->vmid = cfg->cbndx + 1 + smmu->cavium_id_base;
879 else
880 cfg->asid = cfg->cbndx + smmu->cavium_id_base;
881
Will Deacon518f7132014-11-14 17:17:54 +0000882 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100883 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000884 .ias = ias,
885 .oas = oas,
Robin Murphy32b12442017-09-28 15:55:01 +0100886 .tlb = smmu_domain->tlb_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100887 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000888 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100889
Robin Murphy81b3c252017-06-22 16:53:53 +0100890 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
891 pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
892
Robin Murphy44f68762018-09-20 17:10:27 +0100893 if (smmu_domain->non_strict)
894 pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
895
Will Deacon518f7132014-11-14 17:17:54 +0000896 smmu_domain->smmu = smmu;
897 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
898 if (!pgtbl_ops) {
899 ret = -ENOMEM;
900 goto out_clear_smmu;
901 }
902
Robin Murphyd5466352016-05-09 17:20:09 +0100903 /* Update the domain's page sizes to reflect the page table format */
904 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Robin Murphy455eb7d2016-09-12 17:13:58 +0100905 domain->geometry.aperture_end = (1UL << ias) - 1;
906 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +0000907
908 /* Initialise the context bank with our page table cfg */
909 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
Robin Murphy90df3732017-08-08 14:56:14 +0100910 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000911
912 /*
913 * Request context fault interrupt. Do this last to avoid the
914 * handler seeing a half-initialised domain state.
915 */
Will Deacon44680ee2014-06-25 11:29:12 +0100916 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800917 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
918 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200919 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100920 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100921 cfg->irptndx, irq);
922 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100923 }
924
Will Deacon518f7132014-11-14 17:17:54 +0000925 mutex_unlock(&smmu_domain->init_mutex);
926
927 /* Publish page table ops for map/unmap */
928 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100929 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100930
Will Deacon518f7132014-11-14 17:17:54 +0000931out_clear_smmu:
932 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100933out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +0000934 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100935 return ret;
936}
937
938static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
939{
Joerg Roedel1d672632015-03-26 13:43:10 +0100940 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100941 struct arm_smmu_device *smmu = smmu_domain->smmu;
942 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Sricharan Rd4a44f02018-12-04 11:52:10 +0530943 int ret, irq;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100944
Will Deacon61bc6712017-01-06 16:56:03 +0000945 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100946 return;
947
Sricharan Rd4a44f02018-12-04 11:52:10 +0530948 ret = arm_smmu_rpm_get(smmu);
949 if (ret < 0)
950 return;
951
Will Deacon518f7132014-11-14 17:17:54 +0000952 /*
953 * Disable the context bank and free the page tables before freeing
954 * it.
955 */
Robin Murphy90df3732017-08-08 14:56:14 +0100956 smmu->cbs[cfg->cbndx].cfg = NULL;
957 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100958
Will Deacon44680ee2014-06-25 11:29:12 +0100959 if (cfg->irptndx != INVALID_IRPTNDX) {
960 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800961 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100962 }
963
Markus Elfring44830b02015-11-06 18:32:41 +0100964 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +0100965 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Sricharan Rd4a44f02018-12-04 11:52:10 +0530966
967 arm_smmu_rpm_put(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100968}
969
Joerg Roedel1d672632015-03-26 13:43:10 +0100970static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100971{
972 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100973
Will Deacon61bc6712017-01-06 16:56:03 +0000974 if (type != IOMMU_DOMAIN_UNMANAGED &&
975 type != IOMMU_DOMAIN_DMA &&
976 type != IOMMU_DOMAIN_IDENTITY)
Joerg Roedel1d672632015-03-26 13:43:10 +0100977 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100978 /*
979 * Allocate the domain and initialise some of its data structures.
980 * We can't really do anything meaningful until we've added a
981 * master.
982 */
983 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
984 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +0100985 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100986
Robin Murphy021bb842016-09-14 15:26:46 +0100987 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
988 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +0000989 kfree(smmu_domain);
990 return NULL;
991 }
992
Will Deacon518f7132014-11-14 17:17:54 +0000993 mutex_init(&smmu_domain->init_mutex);
Robin Murphy523d7422017-06-22 16:53:56 +0100994 spin_lock_init(&smmu_domain->cb_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +0100995
996 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100997}
998
Joerg Roedel1d672632015-03-26 13:43:10 +0100999static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001000{
Joerg Roedel1d672632015-03-26 13:43:10 +01001001 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001002
1003 /*
1004 * Free the domain resources. We assume that all devices have
1005 * already been detached.
1006 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001007 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001008 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001009 kfree(smmu_domain);
1010}
1011
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001012static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1013{
1014 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyf80cd882016-09-14 15:21:39 +01001015 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001016
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001017 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001018 reg |= SMR_VALID;
1019 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1020}
1021
Robin Murphy8e8b2032016-09-12 17:13:50 +01001022static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1023{
1024 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1025 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1026 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1027 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1028
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001029 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
1030 smmu->smrs[idx].valid)
1031 reg |= S2CR_EXIDVALID;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001032 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1033}
1034
1035static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1036{
1037 arm_smmu_write_s2cr(smmu, idx);
1038 if (smmu->smrs)
1039 arm_smmu_write_smr(smmu, idx);
1040}
1041
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001042/*
1043 * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
1044 * should be called after sCR0 is written.
1045 */
1046static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
1047{
1048 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1049 u32 smr;
1050
1051 if (!smmu->smrs)
1052 return;
1053
1054 /*
1055 * SMR.ID bits may not be preserved if the corresponding MASK
1056 * bits are set, so check each one separately. We can reject
1057 * masters later if they try to claim IDs outside these masks.
1058 */
1059 smr = smmu->streamid_mask << SMR_ID_SHIFT;
1060 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1061 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1062 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
1063
1064 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1065 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1066 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1067 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
1068}
1069
Robin Murphy588888a2016-09-12 17:13:54 +01001070static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001071{
1072 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy588888a2016-09-12 17:13:54 +01001073 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001074
Robin Murphy588888a2016-09-12 17:13:54 +01001075 /* Stream indexing is blissfully easy */
1076 if (!smrs)
1077 return id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001078
Robin Murphy588888a2016-09-12 17:13:54 +01001079 /* Validating SMRs is... less so */
1080 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1081 if (!smrs[i].valid) {
1082 /*
1083 * Note the first free entry we come across, which
1084 * we'll claim in the end if nothing else matches.
1085 */
1086 if (free_idx < 0)
1087 free_idx = i;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001088 continue;
1089 }
Robin Murphy588888a2016-09-12 17:13:54 +01001090 /*
1091 * If the new entry is _entirely_ matched by an existing entry,
1092 * then reuse that, with the guarantee that there also cannot
1093 * be any subsequent conflicting entries. In normal use we'd
1094 * expect simply identical entries for this case, but there's
1095 * no harm in accommodating the generalisation.
1096 */
1097 if ((mask & smrs[i].mask) == mask &&
1098 !((id ^ smrs[i].id) & ~smrs[i].mask))
1099 return i;
1100 /*
1101 * If the new entry has any other overlap with an existing one,
1102 * though, then there always exists at least one stream ID
1103 * which would cause a conflict, and we can't allow that risk.
1104 */
1105 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1106 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001107 }
1108
Robin Murphy588888a2016-09-12 17:13:54 +01001109 return free_idx;
1110}
1111
1112static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1113{
1114 if (--smmu->s2crs[idx].count)
1115 return false;
1116
1117 smmu->s2crs[idx] = s2cr_init_val;
1118 if (smmu->smrs)
1119 smmu->smrs[idx].valid = false;
1120
1121 return true;
1122}
1123
1124static int arm_smmu_master_alloc_smes(struct device *dev)
1125{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001126 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001127 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy588888a2016-09-12 17:13:54 +01001128 struct arm_smmu_device *smmu = cfg->smmu;
1129 struct arm_smmu_smr *smrs = smmu->smrs;
1130 struct iommu_group *group;
1131 int i, idx, ret;
1132
1133 mutex_lock(&smmu->stream_map_mutex);
1134 /* Figure out a viable stream map entry allocation */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001135 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy021bb842016-09-14 15:26:46 +01001136 u16 sid = fwspec->ids[i];
1137 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1138
Robin Murphy588888a2016-09-12 17:13:54 +01001139 if (idx != INVALID_SMENDX) {
1140 ret = -EEXIST;
1141 goto out_err;
1142 }
1143
Robin Murphy021bb842016-09-14 15:26:46 +01001144 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy588888a2016-09-12 17:13:54 +01001145 if (ret < 0)
1146 goto out_err;
1147
1148 idx = ret;
1149 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy021bb842016-09-14 15:26:46 +01001150 smrs[idx].id = sid;
1151 smrs[idx].mask = mask;
Robin Murphy588888a2016-09-12 17:13:54 +01001152 smrs[idx].valid = true;
1153 }
1154 smmu->s2crs[idx].count++;
1155 cfg->smendx[i] = (s16)idx;
1156 }
1157
1158 group = iommu_group_get_for_dev(dev);
1159 if (!group)
1160 group = ERR_PTR(-ENOMEM);
1161 if (IS_ERR(group)) {
1162 ret = PTR_ERR(group);
1163 goto out_err;
1164 }
1165 iommu_group_put(group);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001166
Will Deacon45ae7cf2013-06-24 18:31:25 +01001167 /* It worked! Now, poke the actual hardware */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001168 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001169 arm_smmu_write_sme(smmu, idx);
1170 smmu->s2crs[idx].group = group;
1171 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001172
Robin Murphy588888a2016-09-12 17:13:54 +01001173 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001174 return 0;
1175
Robin Murphy588888a2016-09-12 17:13:54 +01001176out_err:
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001177 while (i--) {
Robin Murphy588888a2016-09-12 17:13:54 +01001178 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001179 cfg->smendx[i] = INVALID_SMENDX;
1180 }
Robin Murphy588888a2016-09-12 17:13:54 +01001181 mutex_unlock(&smmu->stream_map_mutex);
1182 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001183}
1184
Robin Murphyadfec2e2016-09-12 17:13:55 +01001185static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001186{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001187 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1188 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphyd3097e32016-09-12 17:13:53 +01001189 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001190
Robin Murphy588888a2016-09-12 17:13:54 +01001191 mutex_lock(&smmu->stream_map_mutex);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001192 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001193 if (arm_smmu_free_sme(smmu, idx))
1194 arm_smmu_write_sme(smmu, idx);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001195 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001196 }
Robin Murphy588888a2016-09-12 17:13:54 +01001197 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001198}
1199
Will Deacon45ae7cf2013-06-24 18:31:25 +01001200static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphyadfec2e2016-09-12 17:13:55 +01001201 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001202{
Will Deacon44680ee2014-06-25 11:29:12 +01001203 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001204 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001205 u8 cbndx = smmu_domain->cfg.cbndx;
Will Deacon61bc6712017-01-06 16:56:03 +00001206 enum arm_smmu_s2cr_type type;
Robin Murphy588888a2016-09-12 17:13:54 +01001207 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001208
Will Deacon61bc6712017-01-06 16:56:03 +00001209 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
1210 type = S2CR_TYPE_BYPASS;
1211 else
1212 type = S2CR_TYPE_TRANS;
1213
Robin Murphyadfec2e2016-09-12 17:13:55 +01001214 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy8e8b2032016-09-12 17:13:50 +01001215 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy588888a2016-09-12 17:13:54 +01001216 continue;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001217
Robin Murphy8e8b2032016-09-12 17:13:50 +01001218 s2cr[idx].type = type;
Sricharan Re1989802017-01-06 18:58:15 +05301219 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001220 s2cr[idx].cbndx = cbndx;
1221 arm_smmu_write_s2cr(smmu, idx);
Will Deacon43b412b2014-07-15 11:22:24 +01001222 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001223 return 0;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001224}
1225
Will Deacon45ae7cf2013-06-24 18:31:25 +01001226static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1227{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001228 int ret;
Joerg Roedel9b468f72018-11-29 14:01:00 +01001229 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001230 struct arm_smmu_device *smmu;
Joerg Roedel1d672632015-03-26 13:43:10 +01001231 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001232
Robin Murphyadfec2e2016-09-12 17:13:55 +01001233 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001234 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1235 return -ENXIO;
1236 }
1237
Robin Murphyfba4f8e2016-10-17 12:06:21 +01001238 /*
1239 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1240 * domains between of_xlate() and add_device() - we have no way to cope
1241 * with that, so until ARM gets converted to rely on groups and default
1242 * domains, just say no (but more politely than by dereferencing NULL).
1243 * This should be at least a WARN_ON once that's sorted.
1244 */
1245 if (!fwspec->iommu_priv)
1246 return -ENODEV;
1247
Robin Murphyadfec2e2016-09-12 17:13:55 +01001248 smmu = fwspec_smmu(fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301249
1250 ret = arm_smmu_rpm_get(smmu);
1251 if (ret < 0)
1252 return ret;
1253
Will Deacon518f7132014-11-14 17:17:54 +00001254 /* Ensure that the domain is finalised */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001255 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001256 if (ret < 0)
Sricharan Rd4a44f02018-12-04 11:52:10 +05301257 goto rpm_put;
Will Deacon518f7132014-11-14 17:17:54 +00001258
Will Deacon45ae7cf2013-06-24 18:31:25 +01001259 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001260 * Sanity check the domain. We don't support domains across
1261 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001262 */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001263 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001264 dev_err(dev,
1265 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphyadfec2e2016-09-12 17:13:55 +01001266 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Sricharan Rd4a44f02018-12-04 11:52:10 +05301267 ret = -EINVAL;
1268 goto rpm_put;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001269 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001270
1271 /* Looks ok, so add the device to the domain */
Sricharan Rd4a44f02018-12-04 11:52:10 +05301272 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
1273
1274rpm_put:
1275 arm_smmu_rpm_put(smmu);
1276 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001277}
1278
Will Deacon45ae7cf2013-06-24 18:31:25 +01001279static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001280 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001281{
Robin Murphy523d7422017-06-22 16:53:56 +01001282 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301283 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1284 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001285
Will Deacon518f7132014-11-14 17:17:54 +00001286 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001287 return -ENODEV;
1288
Sricharan Rd4a44f02018-12-04 11:52:10 +05301289 arm_smmu_rpm_get(smmu);
1290 ret = ops->map(ops, iova, paddr, size, prot);
1291 arm_smmu_rpm_put(smmu);
1292
1293 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001294}
1295
1296static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1297 size_t size)
1298{
Robin Murphy523d7422017-06-22 16:53:56 +01001299 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301300 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1301 size_t ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001302
Will Deacon518f7132014-11-14 17:17:54 +00001303 if (!ops)
1304 return 0;
1305
Sricharan Rd4a44f02018-12-04 11:52:10 +05301306 arm_smmu_rpm_get(smmu);
1307 ret = ops->unmap(ops, iova, size);
1308 arm_smmu_rpm_put(smmu);
1309
1310 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001311}
1312
Robin Murphy44f68762018-09-20 17:10:27 +01001313static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
1314{
1315 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301316 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy44f68762018-09-20 17:10:27 +01001317
Sricharan Rd4a44f02018-12-04 11:52:10 +05301318 if (smmu_domain->tlb_ops) {
1319 arm_smmu_rpm_get(smmu);
Robin Murphy44f68762018-09-20 17:10:27 +01001320 smmu_domain->tlb_ops->tlb_flush_all(smmu_domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301321 arm_smmu_rpm_put(smmu);
1322 }
Robin Murphy44f68762018-09-20 17:10:27 +01001323}
1324
Robin Murphy32b12442017-09-28 15:55:01 +01001325static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
1326{
1327 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301328 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy32b12442017-09-28 15:55:01 +01001329
Sricharan Rd4a44f02018-12-04 11:52:10 +05301330 if (smmu_domain->tlb_ops) {
1331 arm_smmu_rpm_get(smmu);
Robin Murphy32b12442017-09-28 15:55:01 +01001332 smmu_domain->tlb_ops->tlb_sync(smmu_domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301333 arm_smmu_rpm_put(smmu);
1334 }
Robin Murphy32b12442017-09-28 15:55:01 +01001335}
1336
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001337static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1338 dma_addr_t iova)
1339{
Joerg Roedel1d672632015-03-26 13:43:10 +01001340 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001341 struct arm_smmu_device *smmu = smmu_domain->smmu;
1342 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1343 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1344 struct device *dev = smmu->dev;
1345 void __iomem *cb_base;
1346 u32 tmp;
1347 u64 phys;
Robin Murphy523d7422017-06-22 16:53:56 +01001348 unsigned long va, flags;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301349 int ret;
1350
1351 ret = arm_smmu_rpm_get(smmu);
1352 if (ret < 0)
1353 return 0;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001354
Robin Murphy452107c2017-03-30 17:56:30 +01001355 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001356
Robin Murphy523d7422017-06-22 16:53:56 +01001357 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy661d9622015-05-27 17:09:34 +01001358 /* ATS1 registers can only be written atomically */
1359 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001360 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001361 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1362 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001363 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001364
1365 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1366 !(tmp & ATSR_ACTIVE), 5, 50)) {
Robin Murphy523d7422017-06-22 16:53:56 +01001367 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001368 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001369 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001370 &iova);
1371 return ops->iova_to_phys(ops, iova);
1372 }
1373
Robin Murphyf9a05f02016-04-13 18:13:01 +01001374 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Robin Murphy523d7422017-06-22 16:53:56 +01001375 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001376 if (phys & CB_PAR_F) {
1377 dev_err(dev, "translation fault!\n");
1378 dev_err(dev, "PAR = 0x%llx\n", phys);
1379 return 0;
1380 }
1381
Sricharan Rd4a44f02018-12-04 11:52:10 +05301382 arm_smmu_rpm_put(smmu);
1383
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001384 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1385}
1386
Will Deacon45ae7cf2013-06-24 18:31:25 +01001387static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001388 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001389{
Joerg Roedel1d672632015-03-26 13:43:10 +01001390 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Robin Murphy523d7422017-06-22 16:53:56 +01001391 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001392
Sunil Gouthambdf95922017-04-25 15:27:52 +05301393 if (domain->type == IOMMU_DOMAIN_IDENTITY)
1394 return iova;
1395
Will Deacon518f7132014-11-14 17:17:54 +00001396 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001397 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001398
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001399 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
Robin Murphy523d7422017-06-22 16:53:56 +01001400 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1401 return arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001402
Robin Murphy523d7422017-06-22 16:53:56 +01001403 return ops->iova_to_phys(ops, iova);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001404}
1405
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001406static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001407{
Will Deacond0948942014-06-24 17:30:10 +01001408 switch (cap) {
1409 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001410 /*
1411 * Return true here as the SMMU can always send out coherent
1412 * requests.
1413 */
1414 return true;
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001415 case IOMMU_CAP_NOEXEC:
1416 return true;
Will Deacond0948942014-06-24 17:30:10 +01001417 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001418 return false;
Will Deacond0948942014-06-24 17:30:10 +01001419 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001420}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001421
Robin Murphy021bb842016-09-14 15:26:46 +01001422static int arm_smmu_match_node(struct device *dev, void *data)
1423{
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001424 return dev->fwnode == data;
Robin Murphy021bb842016-09-14 15:26:46 +01001425}
1426
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001427static
1428struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
Robin Murphy021bb842016-09-14 15:26:46 +01001429{
1430 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001431 fwnode, arm_smmu_match_node);
Robin Murphy021bb842016-09-14 15:26:46 +01001432 put_device(dev);
1433 return dev ? dev_get_drvdata(dev) : NULL;
1434}
1435
Will Deacon03edb222015-01-19 14:27:33 +00001436static int arm_smmu_add_device(struct device *dev)
1437{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001438 struct arm_smmu_device *smmu;
Robin Murphyf80cd882016-09-14 15:21:39 +01001439 struct arm_smmu_master_cfg *cfg;
Joerg Roedel9b468f72018-11-29 14:01:00 +01001440 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001441 int i, ret;
1442
Robin Murphy021bb842016-09-14 15:26:46 +01001443 if (using_legacy_binding) {
1444 ret = arm_smmu_register_legacy_master(dev, &smmu);
Artem Savkova7990c62017-08-08 12:26:02 +02001445
1446 /*
1447 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
1448 * will allocate/initialise a new one. Thus we need to update fwspec for
1449 * later use.
1450 */
Joerg Roedel9b468f72018-11-29 14:01:00 +01001451 fwspec = dev_iommu_fwspec_get(dev);
Robin Murphy021bb842016-09-14 15:26:46 +01001452 if (ret)
1453 goto out_free;
Robin Murphy3c117b52016-11-02 17:31:32 +00001454 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001455 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
Robin Murphy021bb842016-09-14 15:26:46 +01001456 } else {
1457 return -ENODEV;
1458 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001459
1460 ret = -EINVAL;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001461 for (i = 0; i < fwspec->num_ids; i++) {
1462 u16 sid = fwspec->ids[i];
Robin Murphy021bb842016-09-14 15:26:46 +01001463 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyf80cd882016-09-14 15:21:39 +01001464
Robin Murphyadfec2e2016-09-12 17:13:55 +01001465 if (sid & ~smmu->streamid_mask) {
Robin Murphyf80cd882016-09-14 15:21:39 +01001466 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy021bb842016-09-14 15:26:46 +01001467 sid, smmu->streamid_mask);
1468 goto out_free;
1469 }
1470 if (mask & ~smmu->smr_mask_mask) {
1471 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
Peng Fan6323f472017-04-21 17:03:36 +08001472 mask, smmu->smr_mask_mask);
Robin Murphyf80cd882016-09-14 15:21:39 +01001473 goto out_free;
1474 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001475 }
Will Deacon03edb222015-01-19 14:27:33 +00001476
Robin Murphyadfec2e2016-09-12 17:13:55 +01001477 ret = -ENOMEM;
1478 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1479 GFP_KERNEL);
1480 if (!cfg)
1481 goto out_free;
1482
1483 cfg->smmu = smmu;
1484 fwspec->iommu_priv = cfg;
1485 while (i--)
1486 cfg->smendx[i] = INVALID_SMENDX;
1487
Sricharan Rd4a44f02018-12-04 11:52:10 +05301488 ret = arm_smmu_rpm_get(smmu);
1489 if (ret < 0)
1490 goto out_cfg_free;
1491
Robin Murphy588888a2016-09-12 17:13:54 +01001492 ret = arm_smmu_master_alloc_smes(dev);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301493 arm_smmu_rpm_put(smmu);
1494
Robin Murphyadfec2e2016-09-12 17:13:55 +01001495 if (ret)
Vivek Gautamc54451a2017-07-06 15:07:00 +05301496 goto out_cfg_free;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001497
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001498 iommu_device_link(&smmu->iommu, dev);
1499
Sricharan R655e3642018-12-04 11:52:11 +05301500 device_link_add(dev, smmu->dev,
1501 DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
1502
Robin Murphyadfec2e2016-09-12 17:13:55 +01001503 return 0;
Robin Murphyf80cd882016-09-14 15:21:39 +01001504
Vivek Gautamc54451a2017-07-06 15:07:00 +05301505out_cfg_free:
1506 kfree(cfg);
Robin Murphyf80cd882016-09-14 15:21:39 +01001507out_free:
Robin Murphyadfec2e2016-09-12 17:13:55 +01001508 iommu_fwspec_free(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001509 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00001510}
1511
Will Deacon45ae7cf2013-06-24 18:31:25 +01001512static void arm_smmu_remove_device(struct device *dev)
1513{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001514 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001515 struct arm_smmu_master_cfg *cfg;
1516 struct arm_smmu_device *smmu;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301517 int ret;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001518
Robin Murphyadfec2e2016-09-12 17:13:55 +01001519 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyf80cd882016-09-14 15:21:39 +01001520 return;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001521
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001522 cfg = fwspec->iommu_priv;
1523 smmu = cfg->smmu;
1524
Sricharan Rd4a44f02018-12-04 11:52:10 +05301525 ret = arm_smmu_rpm_get(smmu);
1526 if (ret < 0)
1527 return;
1528
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001529 iommu_device_unlink(&smmu->iommu, dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001530 arm_smmu_master_free_smes(fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301531
1532 arm_smmu_rpm_put(smmu);
1533
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001534 iommu_group_remove_device(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001535 kfree(fwspec->iommu_priv);
1536 iommu_fwspec_free(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001537}
1538
Joerg Roedelaf659932015-10-21 23:51:41 +02001539static struct iommu_group *arm_smmu_device_group(struct device *dev)
1540{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001541 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001542 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy588888a2016-09-12 17:13:54 +01001543 struct iommu_group *group = NULL;
1544 int i, idx;
1545
Robin Murphyadfec2e2016-09-12 17:13:55 +01001546 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001547 if (group && smmu->s2crs[idx].group &&
1548 group != smmu->s2crs[idx].group)
1549 return ERR_PTR(-EINVAL);
1550
1551 group = smmu->s2crs[idx].group;
1552 }
1553
1554 if (group)
Robin Murphye1b44cb2016-11-11 17:59:22 +00001555 return iommu_group_ref_get(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001556
1557 if (dev_is_pci(dev))
1558 group = pci_device_group(dev);
Nipun Guptaeab03e22018-09-10 19:19:18 +05301559 else if (dev_is_fsl_mc(dev))
1560 group = fsl_mc_device_group(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001561 else
1562 group = generic_device_group(dev);
1563
Joerg Roedelaf659932015-10-21 23:51:41 +02001564 return group;
1565}
1566
Will Deaconc752ce42014-06-25 22:46:31 +01001567static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1568 enum iommu_attr attr, void *data)
1569{
Joerg Roedel1d672632015-03-26 13:43:10 +01001570 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001571
Robin Murphy44f68762018-09-20 17:10:27 +01001572 switch(domain->type) {
1573 case IOMMU_DOMAIN_UNMANAGED:
1574 switch (attr) {
1575 case DOMAIN_ATTR_NESTING:
1576 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1577 return 0;
1578 default:
1579 return -ENODEV;
1580 }
1581 break;
1582 case IOMMU_DOMAIN_DMA:
1583 switch (attr) {
1584 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1585 *(int *)data = smmu_domain->non_strict;
1586 return 0;
1587 default:
1588 return -ENODEV;
1589 }
1590 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001591 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001592 return -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001593 }
1594}
1595
1596static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1597 enum iommu_attr attr, void *data)
1598{
Will Deacon518f7132014-11-14 17:17:54 +00001599 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001600 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001601
Will Deacon518f7132014-11-14 17:17:54 +00001602 mutex_lock(&smmu_domain->init_mutex);
1603
Robin Murphy44f68762018-09-20 17:10:27 +01001604 switch(domain->type) {
1605 case IOMMU_DOMAIN_UNMANAGED:
1606 switch (attr) {
1607 case DOMAIN_ATTR_NESTING:
1608 if (smmu_domain->smmu) {
1609 ret = -EPERM;
1610 goto out_unlock;
1611 }
1612
1613 if (*(int *)data)
1614 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1615 else
1616 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1617 break;
1618 default:
1619 ret = -ENODEV;
Will Deacon518f7132014-11-14 17:17:54 +00001620 }
Robin Murphy44f68762018-09-20 17:10:27 +01001621 break;
1622 case IOMMU_DOMAIN_DMA:
1623 switch (attr) {
1624 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1625 smmu_domain->non_strict = *(int *)data;
1626 break;
1627 default:
1628 ret = -ENODEV;
1629 }
Will Deacon518f7132014-11-14 17:17:54 +00001630 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001631 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001632 ret = -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001633 }
Will Deacon518f7132014-11-14 17:17:54 +00001634out_unlock:
1635 mutex_unlock(&smmu_domain->init_mutex);
1636 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001637}
1638
Robin Murphy021bb842016-09-14 15:26:46 +01001639static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1640{
Robin Murphy56fbf602017-03-31 12:03:33 +01001641 u32 mask, fwid = 0;
Robin Murphy021bb842016-09-14 15:26:46 +01001642
1643 if (args->args_count > 0)
1644 fwid |= (u16)args->args[0];
1645
1646 if (args->args_count > 1)
1647 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
Robin Murphy56fbf602017-03-31 12:03:33 +01001648 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
1649 fwid |= (u16)mask << SMR_MASK_SHIFT;
Robin Murphy021bb842016-09-14 15:26:46 +01001650
1651 return iommu_fwspec_add_ids(dev, &fwid, 1);
1652}
1653
Eric Augerf3ebee82017-01-19 20:57:55 +00001654static void arm_smmu_get_resv_regions(struct device *dev,
1655 struct list_head *head)
1656{
1657 struct iommu_resv_region *region;
1658 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1659
1660 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00001661 prot, IOMMU_RESV_SW_MSI);
Eric Augerf3ebee82017-01-19 20:57:55 +00001662 if (!region)
1663 return;
1664
1665 list_add_tail(&region->list, head);
Robin Murphy273df962017-03-16 17:00:19 +00001666
1667 iommu_dma_get_resv_regions(dev, head);
Eric Augerf3ebee82017-01-19 20:57:55 +00001668}
1669
1670static void arm_smmu_put_resv_regions(struct device *dev,
1671 struct list_head *head)
1672{
1673 struct iommu_resv_region *entry, *next;
1674
1675 list_for_each_entry_safe(entry, next, head, list)
1676 kfree(entry);
1677}
1678
Will Deacon518f7132014-11-14 17:17:54 +00001679static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001680 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001681 .domain_alloc = arm_smmu_domain_alloc,
1682 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001683 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001684 .map = arm_smmu_map,
1685 .unmap = arm_smmu_unmap,
Robin Murphy44f68762018-09-20 17:10:27 +01001686 .flush_iotlb_all = arm_smmu_flush_iotlb_all,
Robin Murphy32b12442017-09-28 15:55:01 +01001687 .iotlb_sync = arm_smmu_iotlb_sync,
Will Deaconc752ce42014-06-25 22:46:31 +01001688 .iova_to_phys = arm_smmu_iova_to_phys,
1689 .add_device = arm_smmu_add_device,
1690 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001691 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001692 .domain_get_attr = arm_smmu_domain_get_attr,
1693 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy021bb842016-09-14 15:26:46 +01001694 .of_xlate = arm_smmu_of_xlate,
Eric Augerf3ebee82017-01-19 20:57:55 +00001695 .get_resv_regions = arm_smmu_get_resv_regions,
1696 .put_resv_regions = arm_smmu_put_resv_regions,
Will Deacon518f7132014-11-14 17:17:54 +00001697 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001698};
1699
1700static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1701{
1702 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001703 int i;
Peng Fan3ca37122016-05-03 21:50:30 +08001704 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001705
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001706 /* clear global FSR */
1707 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1708 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001709
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001710 /*
1711 * Reset stream mapping groups: Initial values mark all SMRn as
1712 * invalid and all S2CRn as bypass unless overridden.
1713 */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001714 for (i = 0; i < smmu->num_mapping_groups; ++i)
1715 arm_smmu_write_sme(smmu, i);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001716
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301717 if (smmu->model == ARM_MMU500) {
1718 /*
1719 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1720 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1721 * bit is only present in MMU-500r2 onwards.
1722 */
1723 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1724 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
Peng Fan3ca37122016-05-03 21:50:30 +08001725 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301726 if (major >= 2)
1727 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1728 /*
1729 * Allow unmatched Stream IDs to allocate bypass
1730 * TLB entries for reduced latency.
1731 */
Feng Kan74f55d32017-10-11 15:08:39 -07001732 reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN;
Peng Fan3ca37122016-05-03 21:50:30 +08001733 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1734 }
1735
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001736 /* Make sure all context banks are disabled and clear CB_FSR */
1737 for (i = 0; i < smmu->num_context_banks; ++i) {
Robin Murphy90df3732017-08-08 14:56:14 +01001738 void __iomem *cb_base = ARM_SMMU_CB(smmu, i);
1739
1740 arm_smmu_write_context_bank(smmu, i);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001741 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001742 /*
1743 * Disable MMU-500's not-particularly-beneficial next-page
1744 * prefetcher for the sake of errata #841119 and #826419.
1745 */
1746 if (smmu->model == ARM_MMU500) {
1747 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1748 reg &= ~ARM_MMU500_ACTLR_CPRE;
1749 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1750 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001751 }
Will Deacon1463fe42013-07-31 19:21:27 +01001752
Will Deacon45ae7cf2013-06-24 18:31:25 +01001753 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001754 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1755 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1756
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001757 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001758
Will Deacon45ae7cf2013-06-24 18:31:25 +01001759 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001760 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001761
1762 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001763 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001764
Robin Murphy25a1c962016-02-10 14:25:33 +00001765 /* Enable client access, handling unmatched streams as appropriate */
1766 reg &= ~sCR0_CLIENTPD;
1767 if (disable_bypass)
1768 reg |= sCR0_USFCFG;
1769 else
1770 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001771
1772 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001773 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001774
1775 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001776 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001777
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001778 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1779 reg |= sCR0_VMID16EN;
1780
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001781 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1782 reg |= sCR0_EXIDENABLE;
1783
Will Deacon45ae7cf2013-06-24 18:31:25 +01001784 /* Push the button */
Robin Murphy11febfc2017-03-30 17:56:31 +01001785 arm_smmu_tlb_sync_global(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001786 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001787}
1788
1789static int arm_smmu_id_size_to_bits(int size)
1790{
1791 switch (size) {
1792 case 0:
1793 return 32;
1794 case 1:
1795 return 36;
1796 case 2:
1797 return 40;
1798 case 3:
1799 return 42;
1800 case 4:
1801 return 44;
1802 case 5:
1803 default:
1804 return 48;
1805 }
1806}
1807
1808static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1809{
1810 unsigned long size;
1811 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1812 u32 id;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001813 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001814 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001815
1816 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001817 dev_notice(smmu->dev, "SMMUv%d with:\n",
1818 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001819
1820 /* ID0 */
1821 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001822
1823 /* Restrict available stages based on module parameter */
1824 if (force_stage == 1)
1825 id &= ~(ID0_S2TS | ID0_NTS);
1826 else if (force_stage == 2)
1827 id &= ~(ID0_S1TS | ID0_NTS);
1828
Will Deacon45ae7cf2013-06-24 18:31:25 +01001829 if (id & ID0_S1TS) {
1830 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1831 dev_notice(smmu->dev, "\tstage 1 translation\n");
1832 }
1833
1834 if (id & ID0_S2TS) {
1835 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1836 dev_notice(smmu->dev, "\tstage 2 translation\n");
1837 }
1838
1839 if (id & ID0_NTS) {
1840 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1841 dev_notice(smmu->dev, "\tnested translation\n");
1842 }
1843
1844 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001845 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001846 dev_err(smmu->dev, "\tno translation support!\n");
1847 return -ENODEV;
1848 }
1849
Robin Murphyb7862e32016-04-13 18:13:03 +01001850 if ((id & ID0_S1TS) &&
1851 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001852 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1853 dev_notice(smmu->dev, "\taddress translation ops\n");
1854 }
1855
Robin Murphybae2c2d2015-07-29 19:46:05 +01001856 /*
1857 * In order for DMA API calls to work properly, we must defer to what
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001858 * the FW says about coherency, regardless of what the hardware claims.
Robin Murphybae2c2d2015-07-29 19:46:05 +01001859 * Fortunately, this also opens up a workaround for systems where the
1860 * ID register value has ended up configured incorrectly.
1861 */
Robin Murphybae2c2d2015-07-29 19:46:05 +01001862 cttw_reg = !!(id & ID0_CTTW);
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001863 if (cttw_fw || cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001864 dev_notice(smmu->dev, "\t%scoherent table walk\n",
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001865 cttw_fw ? "" : "non-");
1866 if (cttw_fw != cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001867 dev_notice(smmu->dev,
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001868 "\t(IDR0.CTTW overridden by FW configuration)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001869
Robin Murphy21174242016-09-12 17:13:48 +01001870 /* Max. number of entries we have for stream matching/indexing */
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001871 if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
1872 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1873 size = 1 << 16;
1874 } else {
1875 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
1876 }
Robin Murphy21174242016-09-12 17:13:48 +01001877 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001878 if (id & ID0_SMS) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001879 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy21174242016-09-12 17:13:48 +01001880 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
1881 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001882 dev_err(smmu->dev,
1883 "stream-matching supported, but no SMRs present!\n");
1884 return -ENODEV;
1885 }
1886
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001887 /* Zero-initialised to mark as invalid */
1888 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1889 GFP_KERNEL);
1890 if (!smmu->smrs)
1891 return -ENOMEM;
1892
Will Deacon45ae7cf2013-06-24 18:31:25 +01001893 dev_notice(smmu->dev,
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001894 "\tstream matching with %lu register groups", size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001895 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001896 /* s2cr->type == 0 means translation, so initialise explicitly */
1897 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1898 GFP_KERNEL);
1899 if (!smmu->s2crs)
1900 return -ENOMEM;
1901 for (i = 0; i < size; i++)
1902 smmu->s2crs[i] = s2cr_init_val;
1903
Robin Murphy21174242016-09-12 17:13:48 +01001904 smmu->num_mapping_groups = size;
Robin Murphy588888a2016-09-12 17:13:54 +01001905 mutex_init(&smmu->stream_map_mutex);
Will Deacon8e517e72017-07-06 15:55:48 +01001906 spin_lock_init(&smmu->global_sync_lock);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001907
Robin Murphy7602b872016-04-28 17:12:09 +01001908 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1909 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1910 if (!(id & ID0_PTFS_NO_AARCH32S))
1911 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1912 }
1913
Will Deacon45ae7cf2013-06-24 18:31:25 +01001914 /* ID1 */
1915 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001916 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001917
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001918 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001919 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Robin Murphy452107c2017-03-30 17:56:30 +01001920 size <<= smmu->pgshift;
1921 if (smmu->cb_base != gr0_base + size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001922 dev_warn(smmu->dev,
Robin Murphy452107c2017-03-30 17:56:30 +01001923 "SMMU address space size (0x%lx) differs from mapped region size (0x%tx)!\n",
1924 size * 2, (smmu->cb_base - gr0_base) * 2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001925
Will Deacon518f7132014-11-14 17:17:54 +00001926 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001927 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1928 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1929 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1930 return -ENODEV;
1931 }
1932 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1933 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001934 /*
1935 * Cavium CN88xx erratum #27704.
1936 * Ensure ASID and VMID allocation is unique across all SMMUs in
1937 * the system.
1938 */
1939 if (smmu->model == CAVIUM_SMMUV2) {
1940 smmu->cavium_id_base =
1941 atomic_add_return(smmu->num_context_banks,
1942 &cavium_smmu_context_count);
1943 smmu->cavium_id_base -= smmu->num_context_banks;
Robert Richter53c35dce2017-03-13 11:39:01 +01001944 dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
Robin Murphye086d912016-04-13 18:12:58 +01001945 }
Robin Murphy90df3732017-08-08 14:56:14 +01001946 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
1947 sizeof(*smmu->cbs), GFP_KERNEL);
1948 if (!smmu->cbs)
1949 return -ENOMEM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001950
1951 /* ID2 */
1952 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1953 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001954 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001955
Will Deacon518f7132014-11-14 17:17:54 +00001956 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001957 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001958 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001959
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001960 if (id & ID2_VMID16)
1961 smmu->features |= ARM_SMMU_FEAT_VMID16;
1962
Robin Murphyf1d84542015-03-04 16:41:05 +00001963 /*
1964 * What the page table walker can address actually depends on which
1965 * descriptor format is in use, but since a) we don't know that yet,
1966 * and b) it can vary per context bank, this will have to do...
1967 */
1968 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1969 dev_warn(smmu->dev,
1970 "failed to set DMA mask for table walker\n");
1971
Robin Murphyb7862e32016-04-13 18:13:03 +01001972 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001973 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001974 if (smmu->version == ARM_SMMU_V1_64K)
1975 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001976 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001977 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00001978 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001979 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001980 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001981 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001982 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001983 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001984 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001985 }
1986
Robin Murphy7602b872016-04-28 17:12:09 +01001987 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01001988 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01001989 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01001990 if (smmu->features &
1991 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01001992 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01001993 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01001994 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01001995 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01001996 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01001997
Robin Murphyd5466352016-05-09 17:20:09 +01001998 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1999 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2000 else
2001 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2002 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
2003 smmu->pgsize_bitmap);
2004
Will Deacon518f7132014-11-14 17:17:54 +00002005
Will Deacon28d60072014-09-01 16:24:48 +01002006 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
2007 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002008 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002009
2010 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
2011 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002012 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002013
Will Deacon45ae7cf2013-06-24 18:31:25 +01002014 return 0;
2015}
2016
Robin Murphy67b65a32016-04-13 18:12:57 +01002017struct arm_smmu_match_data {
2018 enum arm_smmu_arch_version version;
2019 enum arm_smmu_implementation model;
2020};
2021
2022#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
Sricharan R96a299d2018-12-04 11:52:09 +05302023static const struct arm_smmu_match_data name = { .version = ver, .model = imp }
Robin Murphy67b65a32016-04-13 18:12:57 +01002024
2025ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
2026ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01002027ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002028ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01002029ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Vivek Gautam89cddc52018-12-04 11:52:13 +05302030ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01002031
Joerg Roedel09b52692014-10-02 12:24:45 +02002032static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01002033 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
2034 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
2035 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01002036 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002037 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01002038 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Vivek Gautam89cddc52018-12-04 11:52:13 +05302039 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01002040 { },
2041};
Robin Murphy09360402014-08-28 17:51:59 +01002042
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002043#ifdef CONFIG_ACPI
2044static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
2045{
2046 int ret = 0;
2047
2048 switch (model) {
2049 case ACPI_IORT_SMMU_V1:
2050 case ACPI_IORT_SMMU_CORELINK_MMU400:
2051 smmu->version = ARM_SMMU_V1;
2052 smmu->model = GENERIC_SMMU;
2053 break;
Robin Murphy84c24372017-06-19 16:41:56 +01002054 case ACPI_IORT_SMMU_CORELINK_MMU401:
2055 smmu->version = ARM_SMMU_V1_64K;
2056 smmu->model = GENERIC_SMMU;
2057 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002058 case ACPI_IORT_SMMU_V2:
2059 smmu->version = ARM_SMMU_V2;
2060 smmu->model = GENERIC_SMMU;
2061 break;
2062 case ACPI_IORT_SMMU_CORELINK_MMU500:
2063 smmu->version = ARM_SMMU_V2;
2064 smmu->model = ARM_MMU500;
2065 break;
Robin Murphy84c24372017-06-19 16:41:56 +01002066 case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
2067 smmu->version = ARM_SMMU_V2;
2068 smmu->model = CAVIUM_SMMUV2;
2069 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002070 default:
2071 ret = -ENODEV;
2072 }
2073
2074 return ret;
2075}
2076
2077static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2078 struct arm_smmu_device *smmu)
2079{
2080 struct device *dev = smmu->dev;
2081 struct acpi_iort_node *node =
2082 *(struct acpi_iort_node **)dev_get_platdata(dev);
2083 struct acpi_iort_smmu *iort_smmu;
2084 int ret;
2085
2086 /* Retrieve SMMU1/2 specific data */
2087 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
2088
2089 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
2090 if (ret < 0)
2091 return ret;
2092
2093 /* Ignore the configuration access interrupt */
2094 smmu->num_global_irqs = 1;
2095
2096 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
2097 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2098
2099 return 0;
2100}
2101#else
2102static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2103 struct arm_smmu_device *smmu)
2104{
2105 return -ENODEV;
2106}
2107#endif
2108
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002109static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2110 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002111{
Robin Murphy67b65a32016-04-13 18:12:57 +01002112 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002113 struct device *dev = &pdev->dev;
Robin Murphy021bb842016-09-14 15:26:46 +01002114 bool legacy_binding;
2115
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002116 if (of_property_read_u32(dev->of_node, "#global-interrupts",
2117 &smmu->num_global_irqs)) {
2118 dev_err(dev, "missing #global-interrupts property\n");
2119 return -ENODEV;
2120 }
2121
2122 data = of_device_get_match_data(dev);
2123 smmu->version = data->version;
2124 smmu->model = data->model;
2125
2126 parse_driver_options(smmu);
2127
Robin Murphy021bb842016-09-14 15:26:46 +01002128 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
2129 if (legacy_binding && !using_generic_binding) {
2130 if (!using_legacy_binding)
2131 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
2132 using_legacy_binding = true;
2133 } else if (!legacy_binding && !using_legacy_binding) {
2134 using_generic_binding = true;
2135 } else {
2136 dev_err(dev, "not probing due to mismatched DT properties\n");
2137 return -ENODEV;
2138 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002139
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002140 if (of_dma_is_coherent(dev->of_node))
2141 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2142
2143 return 0;
2144}
2145
Robin Murphyf6810c12017-04-10 16:51:05 +05302146static void arm_smmu_bus_init(void)
2147{
2148 /* Oh, for a proper bus abstraction */
2149 if (!iommu_present(&platform_bus_type))
2150 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2151#ifdef CONFIG_ARM_AMBA
2152 if (!iommu_present(&amba_bustype))
2153 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2154#endif
2155#ifdef CONFIG_PCI
2156 if (!iommu_present(&pci_bus_type)) {
2157 pci_request_acs();
2158 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2159 }
2160#endif
Nipun Guptaeab03e22018-09-10 19:19:18 +05302161#ifdef CONFIG_FSL_MC_BUS
2162 if (!iommu_present(&fsl_mc_bus_type))
2163 bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
2164#endif
Robin Murphyf6810c12017-04-10 16:51:05 +05302165}
2166
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002167static int arm_smmu_device_probe(struct platform_device *pdev)
2168{
2169 struct resource *res;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002170 resource_size_t ioaddr;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002171 struct arm_smmu_device *smmu;
2172 struct device *dev = &pdev->dev;
2173 int num_irqs, i, err;
2174
Will Deacon45ae7cf2013-06-24 18:31:25 +01002175 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2176 if (!smmu) {
2177 dev_err(dev, "failed to allocate arm_smmu_device\n");
2178 return -ENOMEM;
2179 }
2180 smmu->dev = dev;
2181
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002182 if (dev->of_node)
2183 err = arm_smmu_device_dt_probe(pdev, smmu);
2184 else
2185 err = arm_smmu_device_acpi_probe(pdev, smmu);
2186
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002187 if (err)
2188 return err;
Robin Murphy09360402014-08-28 17:51:59 +01002189
Will Deacon45ae7cf2013-06-24 18:31:25 +01002190 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002191 ioaddr = res->start;
Julia Lawall8a7f4312013-08-19 12:20:37 +01002192 smmu->base = devm_ioremap_resource(dev, res);
2193 if (IS_ERR(smmu->base))
2194 return PTR_ERR(smmu->base);
Robin Murphy452107c2017-03-30 17:56:30 +01002195 smmu->cb_base = smmu->base + resource_size(res) / 2;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002196
Will Deacon45ae7cf2013-06-24 18:31:25 +01002197 num_irqs = 0;
2198 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2199 num_irqs++;
2200 if (num_irqs > smmu->num_global_irqs)
2201 smmu->num_context_irqs++;
2202 }
2203
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002204 if (!smmu->num_context_irqs) {
2205 dev_err(dev, "found %d interrupts but expected at least %d\n",
2206 num_irqs, smmu->num_global_irqs + 1);
2207 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002208 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002209
Kees Cooka86854d2018-06-12 14:07:58 -07002210 smmu->irqs = devm_kcalloc(dev, num_irqs, sizeof(*smmu->irqs),
Will Deacon45ae7cf2013-06-24 18:31:25 +01002211 GFP_KERNEL);
2212 if (!smmu->irqs) {
2213 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2214 return -ENOMEM;
2215 }
2216
2217 for (i = 0; i < num_irqs; ++i) {
2218 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002219
Will Deacon45ae7cf2013-06-24 18:31:25 +01002220 if (irq < 0) {
2221 dev_err(dev, "failed to get irq index %d\n", i);
2222 return -ENODEV;
2223 }
2224 smmu->irqs[i] = irq;
2225 }
2226
Sricharan R96a299d2018-12-04 11:52:09 +05302227 err = devm_clk_bulk_get_all(dev, &smmu->clks);
2228 if (err < 0) {
2229 dev_err(dev, "failed to get clocks %d\n", err);
2230 return err;
2231 }
2232 smmu->num_clks = err;
2233
2234 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
2235 if (err)
2236 return err;
2237
Olav Haugan3c8766d2014-08-22 17:12:32 -07002238 err = arm_smmu_device_cfg_probe(smmu);
2239 if (err)
2240 return err;
2241
Vivek Gautamd1e20222018-07-19 23:23:56 +05302242 if (smmu->version == ARM_SMMU_V2) {
2243 if (smmu->num_context_banks > smmu->num_context_irqs) {
2244 dev_err(dev,
2245 "found only %d context irq(s) but %d required\n",
2246 smmu->num_context_irqs, smmu->num_context_banks);
2247 return -ENODEV;
2248 }
2249
2250 /* Ignore superfluous interrupts */
2251 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002252 }
2253
Will Deacon45ae7cf2013-06-24 18:31:25 +01002254 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08002255 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2256 arm_smmu_global_fault,
2257 IRQF_SHARED,
2258 "arm-smmu global fault",
2259 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002260 if (err) {
2261 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2262 i, smmu->irqs[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01002263 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002264 }
2265 }
2266
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002267 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2268 "smmu.%pa", &ioaddr);
2269 if (err) {
2270 dev_err(dev, "Failed to register iommu in sysfs\n");
2271 return err;
2272 }
2273
2274 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2275 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2276
2277 err = iommu_device_register(&smmu->iommu);
2278 if (err) {
2279 dev_err(dev, "Failed to register iommu\n");
2280 return err;
2281 }
2282
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002283 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01002284 arm_smmu_device_reset(smmu);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03002285 arm_smmu_test_smr_masks(smmu);
Robin Murphy021bb842016-09-14 15:26:46 +01002286
Robin Murphyf6810c12017-04-10 16:51:05 +05302287 /*
Sricharan Rd4a44f02018-12-04 11:52:10 +05302288 * We want to avoid touching dev->power.lock in fastpaths unless
2289 * it's really going to do something useful - pm_runtime_enabled()
2290 * can serve as an ideal proxy for that decision. So, conditionally
2291 * enable pm_runtime.
2292 */
2293 if (dev->pm_domain) {
2294 pm_runtime_set_active(dev);
2295 pm_runtime_enable(dev);
2296 }
2297
2298 /*
Robin Murphyf6810c12017-04-10 16:51:05 +05302299 * For ACPI and generic DT bindings, an SMMU will be probed before
2300 * any device which might need it, so we want the bus ops in place
2301 * ready to handle default domain setup as soon as any SMMU exists.
2302 */
2303 if (!using_legacy_binding)
2304 arm_smmu_bus_init();
2305
Will Deacon45ae7cf2013-06-24 18:31:25 +01002306 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002307}
2308
Robin Murphyf6810c12017-04-10 16:51:05 +05302309/*
2310 * With the legacy DT binding in play, though, we have no guarantees about
2311 * probe order, but then we're also not doing default domains, so we can
2312 * delay setting bus ops until we're sure every possible SMMU is ready,
2313 * and that way ensure that no add_device() calls get missed.
2314 */
2315static int arm_smmu_legacy_bus_init(void)
2316{
2317 if (using_legacy_binding)
2318 arm_smmu_bus_init();
2319 return 0;
2320}
2321device_initcall_sync(arm_smmu_legacy_bus_init);
2322
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002323static void arm_smmu_device_shutdown(struct platform_device *pdev)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002324{
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002325 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002326
2327 if (!smmu)
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002328 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002329
Will Deaconecfadb62013-07-31 19:21:28 +01002330 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002331 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002332
Sricharan Rd4a44f02018-12-04 11:52:10 +05302333 arm_smmu_rpm_get(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002334 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002335 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Sricharan Rd4a44f02018-12-04 11:52:10 +05302336 arm_smmu_rpm_put(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302337
Sricharan Rd4a44f02018-12-04 11:52:10 +05302338 if (pm_runtime_enabled(smmu->dev))
2339 pm_runtime_force_suspend(smmu->dev);
2340 else
2341 clk_bulk_disable(smmu->num_clks, smmu->clks);
2342
2343 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
Nate Watterson7aa86192017-06-29 18:18:15 -04002344}
2345
Sricharan R96a299d2018-12-04 11:52:09 +05302346static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
Robin Murphya2d866f2017-08-08 14:56:15 +01002347{
2348 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
Sricharan R96a299d2018-12-04 11:52:09 +05302349 int ret;
2350
2351 ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
2352 if (ret)
2353 return ret;
Robin Murphya2d866f2017-08-08 14:56:15 +01002354
2355 arm_smmu_device_reset(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302356
Will Deacon45ae7cf2013-06-24 18:31:25 +01002357 return 0;
2358}
2359
Sricharan R96a299d2018-12-04 11:52:09 +05302360static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
Dan Carpenter6614ee72013-08-21 09:34:20 +01002361{
Sricharan R96a299d2018-12-04 11:52:09 +05302362 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2363
2364 clk_bulk_disable(smmu->num_clks, smmu->clks);
2365
2366 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002367}
2368
Robin Murphya2d866f2017-08-08 14:56:15 +01002369static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
2370{
Sricharan R96a299d2018-12-04 11:52:09 +05302371 if (pm_runtime_suspended(dev))
2372 return 0;
Robin Murphya2d866f2017-08-08 14:56:15 +01002373
Sricharan R96a299d2018-12-04 11:52:09 +05302374 return arm_smmu_runtime_resume(dev);
Robin Murphya2d866f2017-08-08 14:56:15 +01002375}
2376
Sricharan R96a299d2018-12-04 11:52:09 +05302377static int __maybe_unused arm_smmu_pm_suspend(struct device *dev)
2378{
2379 if (pm_runtime_suspended(dev))
2380 return 0;
2381
2382 return arm_smmu_runtime_suspend(dev);
2383}
2384
2385static const struct dev_pm_ops arm_smmu_pm_ops = {
2386 SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_pm_suspend, arm_smmu_pm_resume)
2387 SET_RUNTIME_PM_OPS(arm_smmu_runtime_suspend,
2388 arm_smmu_runtime_resume, NULL)
2389};
Robin Murphya2d866f2017-08-08 14:56:15 +01002390
Will Deacon45ae7cf2013-06-24 18:31:25 +01002391static struct platform_driver arm_smmu_driver = {
2392 .driver = {
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002393 .name = "arm-smmu",
2394 .of_match_table = of_match_ptr(arm_smmu_of_match),
2395 .pm = &arm_smmu_pm_ops,
2396 .suppress_bind_attrs = true,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002397 },
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002398 .probe = arm_smmu_device_probe,
Nate Watterson7aa86192017-06-29 18:18:15 -04002399 .shutdown = arm_smmu_device_shutdown,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002400};
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002401builtin_platform_driver(arm_smmu_driver);