blob: 4ce429b74655fed0fa786e6cee5dd929e9a35481 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +030027 * - Extended Stream ID (16 bit)
Will Deacon45ae7cf2013-06-24 18:31:25 +010028 */
29
30#define pr_fmt(fmt) "arm-smmu: " fmt
31
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +000032#include <linux/acpi.h>
33#include <linux/acpi_iort.h>
Robin Murphy1f3d5ca2016-09-12 17:13:49 +010034#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010035#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000036#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010037#include <linux/dma-mapping.h>
38#include <linux/err.h>
39#include <linux/interrupt.h>
40#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010041#include <linux/io-64-nonatomic-hi-lo.h>
Rob Herringb77cf112019-02-05 10:37:31 -060042#include <linux/io-pgtable.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010043#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000044#include <linux/iopoll.h>
Paul Gortmakeraddb672f2018-12-01 14:19:16 -050045#include <linux/init.h>
46#include <linux/moduleparam.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010047#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010048#include <linux/of_address.h>
Robin Murphyd6fc5d92016-09-12 17:13:52 +010049#include <linux/of_device.h>
Robin Murphyadfec2e2016-09-12 17:13:55 +010050#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010051#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010052#include <linux/platform_device.h>
Sricharan R96a299d2018-12-04 11:52:09 +053053#include <linux/pm_runtime.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010054#include <linux/slab.h>
55#include <linux/spinlock.h>
56
57#include <linux/amba/bus.h>
Nipun Guptaeab03e22018-09-10 19:19:18 +053058#include <linux/fsl/mc.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010059
Rob Clark2b037742017-08-09 10:43:03 -040060#include "arm-smmu-regs.h"
61
62#define ARM_MMU500_ACTLR_CPRE (1 << 1)
63
64#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
Feng Kan74f55d32017-10-11 15:08:39 -070065#define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10)
Rob Clark2b037742017-08-09 10:43:03 -040066#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
67
68#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
69#define TLB_SPIN_COUNT 10
Will Deacon45ae7cf2013-06-24 18:31:25 +010070
Will Deacon45ae7cf2013-06-24 18:31:25 +010071/* Maximum number of context banks per SMMU */
72#define ARM_SMMU_MAX_CBS 128
73
Will Deacon45ae7cf2013-06-24 18:31:25 +010074/* SMMU global address space */
75#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010076#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010077
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000078/*
79 * SMMU global address space with conditional offset to access secure
80 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
81 * nsGFSYNR0: 0x450)
82 */
83#define ARM_SMMU_GR0_NS(smmu) \
84 ((smmu)->base + \
85 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
86 ? 0x400 : 0))
87
Robin Murphyf9a05f02016-04-13 18:13:01 +010088/*
89 * Some 64-bit registers only make sense to write atomically, but in such
90 * cases all the data relevant to AArch32 formats lies within the lower word,
91 * therefore this actually makes more sense than it might first appear.
92 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010093#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010094#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010095#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010096#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010097#endif
98
Will Deacon45ae7cf2013-06-24 18:31:25 +010099/* Translation context bank */
Robin Murphy452107c2017-03-30 17:56:30 +0100100#define ARM_SMMU_CB(smmu, n) ((smmu)->cb_base + ((n) << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100101
Eric Augerf3ebee82017-01-19 20:57:55 +0000102#define MSI_IOVA_BASE 0x8000000
103#define MSI_IOVA_LENGTH 0x100000
104
Will Deacon4cf740b2014-07-14 19:47:39 +0100105static int force_stage;
Paul Gortmakeraddb672f2018-12-01 14:19:16 -0500106/*
107 * not really modular, but the easiest way to keep compat with existing
108 * bootargs behaviour is to continue using module_param() here.
109 */
Robin Murphy25a1c962016-02-10 14:25:33 +0000110module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100111MODULE_PARM_DESC(force_stage,
112 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Douglas Anderson954a03b2019-03-01 11:20:17 -0800113static bool disable_bypass =
114 IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT);
Robin Murphy25a1c962016-02-10 14:25:33 +0000115module_param(disable_bypass, bool, S_IRUGO);
116MODULE_PARM_DESC(disable_bypass,
117 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100118
Robin Murphy09360402014-08-28 17:51:59 +0100119enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100120 ARM_SMMU_V1,
121 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100122 ARM_SMMU_V2,
123};
124
Robin Murphy67b65a32016-04-13 18:12:57 +0100125enum arm_smmu_implementation {
126 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100127 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100128 CAVIUM_SMMUV2,
Vivek Gautam89cddc52018-12-04 11:52:13 +0530129 QCOM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100130};
131
Robin Murphy8e8b2032016-09-12 17:13:50 +0100132struct arm_smmu_s2cr {
Robin Murphy588888a2016-09-12 17:13:54 +0100133 struct iommu_group *group;
134 int count;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100135 enum arm_smmu_s2cr_type type;
136 enum arm_smmu_s2cr_privcfg privcfg;
137 u8 cbndx;
138};
139
140#define s2cr_init_val (struct arm_smmu_s2cr){ \
141 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
142}
143
Will Deacon45ae7cf2013-06-24 18:31:25 +0100144struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100145 u16 mask;
146 u16 id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100147 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100148};
149
Robin Murphy90df3732017-08-08 14:56:14 +0100150struct arm_smmu_cb {
151 u64 ttbr[2];
152 u32 tcr[2];
153 u32 mair[2];
154 struct arm_smmu_cfg *cfg;
155};
156
Will Deacona9a1b0b2014-05-01 18:05:08 +0100157struct arm_smmu_master_cfg {
Robin Murphyf80cd882016-09-14 15:21:39 +0100158 struct arm_smmu_device *smmu;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100159 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100160};
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100161#define INVALID_SMENDX -1
Robin Murphyadfec2e2016-09-12 17:13:55 +0100162#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
163#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000164#define fwspec_smendx(fw, i) \
165 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphyadfec2e2016-09-12 17:13:55 +0100166#define for_each_cfg_sme(fw, i, idx) \
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000167 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100168
169struct arm_smmu_device {
170 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100171
172 void __iomem *base;
Robin Murphy452107c2017-03-30 17:56:30 +0100173 void __iomem *cb_base;
Will Deaconc757e852014-07-30 11:33:25 +0100174 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100175
176#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
177#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
178#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
179#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
180#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000181#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800182#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100183#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
184#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
185#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
186#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
187#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300188#define ARM_SMMU_FEAT_EXIDS (1 << 12)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100189 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000190
191#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
192 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100193 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100194 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100195
196 u32 num_context_banks;
197 u32 num_s2_context_banks;
198 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
Robin Murphy90df3732017-08-08 14:56:14 +0100199 struct arm_smmu_cb *cbs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100200 atomic_t irptndx;
201
202 u32 num_mapping_groups;
Robin Murphy21174242016-09-12 17:13:48 +0100203 u16 streamid_mask;
204 u16 smr_mask_mask;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100205 struct arm_smmu_smr *smrs;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100206 struct arm_smmu_s2cr *s2crs;
Robin Murphy588888a2016-09-12 17:13:54 +0100207 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100208
Will Deacon518f7132014-11-14 17:17:54 +0000209 unsigned long va_size;
210 unsigned long ipa_size;
211 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100212 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100213
214 u32 num_global_irqs;
215 u32 num_context_irqs;
216 unsigned int *irqs;
Sricharan R96a299d2018-12-04 11:52:09 +0530217 struct clk_bulk_data *clks;
218 int num_clks;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100219
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800220 u32 cavium_id_base; /* Specific to Cavium */
Joerg Roedel9648cbc2017-02-01 18:11:36 +0100221
Will Deacon8e517e72017-07-06 15:55:48 +0100222 spinlock_t global_sync_lock;
223
Joerg Roedel9648cbc2017-02-01 18:11:36 +0100224 /* IOMMU core code handle */
225 struct iommu_device iommu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100226};
227
Robin Murphy7602b872016-04-28 17:12:09 +0100228enum arm_smmu_context_fmt {
229 ARM_SMMU_CTX_FMT_NONE,
230 ARM_SMMU_CTX_FMT_AARCH64,
231 ARM_SMMU_CTX_FMT_AARCH32_L,
232 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100233};
234
235struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100236 u8 cbndx;
237 u8 irptndx;
Robin Murphy280b6832017-03-30 17:56:29 +0100238 union {
239 u16 asid;
240 u16 vmid;
241 };
Will Deacon45ae7cf2013-06-24 18:31:25 +0100242 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100243 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100244};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100245#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100246
Will Deaconc752ce42014-06-25 22:46:31 +0100247enum arm_smmu_domain_stage {
248 ARM_SMMU_DOMAIN_S1 = 0,
249 ARM_SMMU_DOMAIN_S2,
250 ARM_SMMU_DOMAIN_NESTED,
Will Deacon61bc6712017-01-06 16:56:03 +0000251 ARM_SMMU_DOMAIN_BYPASS,
Will Deaconc752ce42014-06-25 22:46:31 +0100252};
253
Will Deacon45ae7cf2013-06-24 18:31:25 +0100254struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100255 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000256 struct io_pgtable_ops *pgtbl_ops;
Robin Murphy32b12442017-09-28 15:55:01 +0100257 const struct iommu_gather_ops *tlb_ops;
Will Deacon44680ee2014-06-25 11:29:12 +0100258 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100259 enum arm_smmu_domain_stage stage;
Robin Murphy44f68762018-09-20 17:10:27 +0100260 bool non_strict;
Will Deacon518f7132014-11-14 17:17:54 +0000261 struct mutex init_mutex; /* Protects smmu pointer */
Will Deacon8e517e72017-07-06 15:55:48 +0100262 spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
Joerg Roedel1d672632015-03-26 13:43:10 +0100263 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100264};
265
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000266struct arm_smmu_option_prop {
267 u32 opt;
268 const char *prop;
269};
270
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800271static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
272
Robin Murphy021bb842016-09-14 15:26:46 +0100273static bool using_legacy_binding, using_generic_binding;
274
Mitchel Humpherys29073202014-07-08 09:52:18 -0700275static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000276 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
277 { 0, NULL},
278};
279
Sricharan Rd4a44f02018-12-04 11:52:10 +0530280static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
281{
282 if (pm_runtime_enabled(smmu->dev))
283 return pm_runtime_get_sync(smmu->dev);
284
285 return 0;
286}
287
288static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
289{
290 if (pm_runtime_enabled(smmu->dev))
291 pm_runtime_put(smmu->dev);
292}
293
Joerg Roedel1d672632015-03-26 13:43:10 +0100294static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
295{
296 return container_of(dom, struct arm_smmu_domain, domain);
297}
298
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000299static void parse_driver_options(struct arm_smmu_device *smmu)
300{
301 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700302
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000303 do {
304 if (of_property_read_bool(smmu->dev->of_node,
305 arm_smmu_options[i].prop)) {
306 smmu->options |= arm_smmu_options[i].opt;
307 dev_notice(smmu->dev, "option %s\n",
308 arm_smmu_options[i].prop);
309 }
310 } while (arm_smmu_options[++i].opt);
311}
312
Will Deacon8f68f8e2014-07-15 11:27:08 +0100313static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100314{
315 if (dev_is_pci(dev)) {
316 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700317
Will Deacona9a1b0b2014-05-01 18:05:08 +0100318 while (!pci_is_root_bus(bus))
319 bus = bus->parent;
Robin Murphyf80cd882016-09-14 15:21:39 +0100320 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100321 }
322
Robin Murphyf80cd882016-09-14 15:21:39 +0100323 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100324}
325
Robin Murphyf80cd882016-09-14 15:21:39 +0100326static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100327{
Robin Murphyf80cd882016-09-14 15:21:39 +0100328 *((__be32 *)data) = cpu_to_be32(alias);
329 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100330}
331
Robin Murphyf80cd882016-09-14 15:21:39 +0100332static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100333{
Robin Murphyf80cd882016-09-14 15:21:39 +0100334 struct of_phandle_iterator *it = *(void **)data;
335 struct device_node *np = it->node;
336 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100337
Robin Murphyf80cd882016-09-14 15:21:39 +0100338 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
339 "#stream-id-cells", 0)
340 if (it->node == np) {
341 *(void **)data = dev;
342 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700343 }
Robin Murphyf80cd882016-09-14 15:21:39 +0100344 it->node = np;
345 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100346}
347
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100348static struct platform_driver arm_smmu_driver;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100349static struct iommu_ops arm_smmu_ops;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100350
Robin Murphyadfec2e2016-09-12 17:13:55 +0100351static int arm_smmu_register_legacy_master(struct device *dev,
352 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100353{
Robin Murphyadfec2e2016-09-12 17:13:55 +0100354 struct device *smmu_dev;
Robin Murphyf80cd882016-09-14 15:21:39 +0100355 struct device_node *np;
356 struct of_phandle_iterator it;
357 void *data = &it;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100358 u32 *sids;
Robin Murphyf80cd882016-09-14 15:21:39 +0100359 __be32 pci_sid;
360 int err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100361
Robin Murphyf80cd882016-09-14 15:21:39 +0100362 np = dev_get_dev_node(dev);
363 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
364 of_node_put(np);
365 return -ENODEV;
366 }
367
368 it.node = np;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100369 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
370 __find_legacy_master_phandle);
Robin Murphyadfec2e2016-09-12 17:13:55 +0100371 smmu_dev = data;
Robin Murphyf80cd882016-09-14 15:21:39 +0100372 of_node_put(np);
373 if (err == 0)
374 return -ENODEV;
375 if (err < 0)
376 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100377
Robin Murphyf80cd882016-09-14 15:21:39 +0100378 if (dev_is_pci(dev)) {
379 /* "mmu-masters" assumes Stream ID == Requester ID */
380 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
381 &pci_sid);
382 it.cur = &pci_sid;
383 it.cur_count = 1;
384 }
385
Robin Murphyadfec2e2016-09-12 17:13:55 +0100386 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
387 &arm_smmu_ops);
388 if (err)
389 return err;
390
391 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
392 if (!sids)
Robin Murphyf80cd882016-09-14 15:21:39 +0100393 return -ENOMEM;
394
Robin Murphyadfec2e2016-09-12 17:13:55 +0100395 *smmu = dev_get_drvdata(smmu_dev);
396 of_phandle_iterator_args(&it, sids, it.cur_count);
397 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
398 kfree(sids);
399 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100400}
401
402static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
403{
404 int idx;
405
406 do {
407 idx = find_next_zero_bit(map, end, start);
408 if (idx == end)
409 return -ENOSPC;
410 } while (test_and_set_bit(idx, map));
411
412 return idx;
413}
414
415static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
416{
417 clear_bit(idx, map);
418}
419
420/* Wait for any pending TLB invalidations to complete */
Robin Murphy11febfc2017-03-30 17:56:31 +0100421static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
422 void __iomem *sync, void __iomem *status)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100423{
Robin Murphy8513c892017-03-30 17:56:32 +0100424 unsigned int spin_cnt, delay;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100425
Robin Murphy11febfc2017-03-30 17:56:31 +0100426 writel_relaxed(0, sync);
Robin Murphy8513c892017-03-30 17:56:32 +0100427 for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
428 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
429 if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
430 return;
431 cpu_relax();
Will Deacon45ae7cf2013-06-24 18:31:25 +0100432 }
Robin Murphy8513c892017-03-30 17:56:32 +0100433 udelay(delay);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100434 }
Robin Murphy8513c892017-03-30 17:56:32 +0100435 dev_err_ratelimited(smmu->dev,
436 "TLB sync timed out -- SMMU may be deadlocked\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +0100437}
438
Robin Murphy11febfc2017-03-30 17:56:31 +0100439static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
Will Deacon1463fe42013-07-31 19:21:27 +0100440{
Robin Murphy11febfc2017-03-30 17:56:31 +0100441 void __iomem *base = ARM_SMMU_GR0(smmu);
Will Deacon8e517e72017-07-06 15:55:48 +0100442 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100443
Will Deacon8e517e72017-07-06 15:55:48 +0100444 spin_lock_irqsave(&smmu->global_sync_lock, flags);
Robin Murphy11febfc2017-03-30 17:56:31 +0100445 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
446 base + ARM_SMMU_GR0_sTLBGSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100447 spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000448}
449
Robin Murphy11febfc2017-03-30 17:56:31 +0100450static void arm_smmu_tlb_sync_context(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100451{
Will Deacon518f7132014-11-14 17:17:54 +0000452 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy11febfc2017-03-30 17:56:31 +0100453 struct arm_smmu_device *smmu = smmu_domain->smmu;
454 void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
Will Deacon8e517e72017-07-06 15:55:48 +0100455 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100456
Will Deacon8e517e72017-07-06 15:55:48 +0100457 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy11febfc2017-03-30 17:56:31 +0100458 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
459 base + ARM_SMMU_CB_TLBSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100460 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000461}
462
Robin Murphy11febfc2017-03-30 17:56:31 +0100463static void arm_smmu_tlb_sync_vmid(void *cookie)
464{
465 struct arm_smmu_domain *smmu_domain = cookie;
466
467 arm_smmu_tlb_sync_global(smmu_domain->smmu);
468}
469
470static void arm_smmu_tlb_inv_context_s1(void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000471{
472 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100473 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy11febfc2017-03-30 17:56:31 +0100474 void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
475
Robin Murphy44f68762018-09-20 17:10:27 +0100476 /*
477 * NOTE: this is not a relaxed write; it needs to guarantee that PTEs
478 * cleared by the current CPU are visible to the SMMU before the TLBI.
479 */
480 writel(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
Robin Murphy11febfc2017-03-30 17:56:31 +0100481 arm_smmu_tlb_sync_context(cookie);
482}
483
484static void arm_smmu_tlb_inv_context_s2(void *cookie)
485{
486 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100487 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy11febfc2017-03-30 17:56:31 +0100488 void __iomem *base = ARM_SMMU_GR0(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100489
Robin Murphy44f68762018-09-20 17:10:27 +0100490 /* NOTE: see above */
491 writel(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
Robin Murphy11febfc2017-03-30 17:56:31 +0100492 arm_smmu_tlb_sync_global(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100493}
494
Will Deacon518f7132014-11-14 17:17:54 +0000495static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000496 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000497{
498 struct arm_smmu_domain *smmu_domain = cookie;
499 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000500 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Robin Murphy11febfc2017-03-30 17:56:31 +0100501 void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000502
Will Deacon7d321bd32018-10-01 12:42:49 +0100503 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
504 wmb();
505
Will Deacon518f7132014-11-14 17:17:54 +0000506 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +0000507 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
508
Robin Murphy7602b872016-04-28 17:12:09 +0100509 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000510 iova &= ~12UL;
Robin Murphy280b6832017-03-30 17:56:29 +0100511 iova |= cfg->asid;
Robin Murphy75df1382015-12-07 18:18:52 +0000512 do {
513 writel_relaxed(iova, reg);
514 iova += granule;
515 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000516 } else {
517 iova >>= 12;
Robin Murphy280b6832017-03-30 17:56:29 +0100518 iova |= (u64)cfg->asid << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000519 do {
520 writeq_relaxed(iova, reg);
521 iova += granule >> 12;
522 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000523 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100524 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000525 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
526 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000527 iova >>= 12;
528 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100529 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000530 iova += granule >> 12;
531 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000532 }
533}
534
Robin Murphy11febfc2017-03-30 17:56:31 +0100535/*
536 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
537 * almost negligible, but the benefit of getting the first one in as far ahead
538 * of the sync as possible is significant, hence we don't just make this a
539 * no-op and set .tlb_sync to arm_smmu_inv_context_s2() as you might think.
540 */
541static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
542 size_t granule, bool leaf, void *cookie)
543{
544 struct arm_smmu_domain *smmu_domain = cookie;
545 void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
546
Will Deacon7d321bd32018-10-01 12:42:49 +0100547 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
548 wmb();
549
Robin Murphy11febfc2017-03-30 17:56:31 +0100550 writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
551}
552
553static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = {
554 .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
Will Deacon518f7132014-11-14 17:17:54 +0000555 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
Robin Murphy11febfc2017-03-30 17:56:31 +0100556 .tlb_sync = arm_smmu_tlb_sync_context,
557};
558
559static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = {
560 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
561 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
562 .tlb_sync = arm_smmu_tlb_sync_context,
563};
564
565static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = {
566 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
567 .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync,
568 .tlb_sync = arm_smmu_tlb_sync_vmid,
Will Deacon518f7132014-11-14 17:17:54 +0000569};
570
Will Deacon45ae7cf2013-06-24 18:31:25 +0100571static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
572{
Vivek Gautambc580b52019-04-22 12:40:36 +0530573 u32 fsr, fsynr, cbfrsynra;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100574 unsigned long iova;
575 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100576 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100577 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
578 struct arm_smmu_device *smmu = smmu_domain->smmu;
Vivek Gautambc580b52019-04-22 12:40:36 +0530579 void __iomem *gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100580 void __iomem *cb_base;
581
Robin Murphy452107c2017-03-30 17:56:30 +0100582 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100583 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
584
585 if (!(fsr & FSR_FAULT))
586 return IRQ_NONE;
587
Will Deacon45ae7cf2013-06-24 18:31:25 +0100588 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Robin Murphyf9a05f02016-04-13 18:13:01 +0100589 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Vivek Gautambc580b52019-04-22 12:40:36 +0530590 cbfrsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100591
Will Deacon3714ce1d2016-08-05 19:49:45 +0100592 dev_err_ratelimited(smmu->dev,
Vivek Gautambc580b52019-04-22 12:40:36 +0530593 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
594 fsr, iova, fsynr, cbfrsynra, cfg->cbndx);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100595
Will Deacon45ae7cf2013-06-24 18:31:25 +0100596 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100597 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100598}
599
600static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
601{
602 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
603 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000604 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100605
606 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
607 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
608 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
609 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
610
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000611 if (!gfsr)
612 return IRQ_NONE;
613
Will Deacon45ae7cf2013-06-24 18:31:25 +0100614 dev_err_ratelimited(smmu->dev,
615 "Unexpected global fault, this could be serious\n");
616 dev_err_ratelimited(smmu->dev,
617 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
618 gfsr, gfsynr0, gfsynr1, gfsynr2);
619
620 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100621 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100622}
623
Will Deacon518f7132014-11-14 17:17:54 +0000624static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
625 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100626{
Will Deacon44680ee2014-06-25 11:29:12 +0100627 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy90df3732017-08-08 14:56:14 +0100628 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
629 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
630
631 cb->cfg = cfg;
632
633 /* TTBCR */
634 if (stage1) {
635 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
636 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
637 } else {
638 cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
639 cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
640 cb->tcr[1] |= TTBCR2_SEP_UPSTREAM;
641 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
642 cb->tcr[1] |= TTBCR2_AS;
643 }
644 } else {
645 cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
646 }
647
648 /* TTBRs */
649 if (stage1) {
650 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
651 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
652 cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
653 } else {
654 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
655 cb->ttbr[0] |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
656 cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
657 cb->ttbr[1] |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
658 }
659 } else {
660 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
661 }
662
663 /* MAIRs (stage-1 only) */
664 if (stage1) {
665 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
666 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
667 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
668 } else {
669 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
670 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
671 }
672 }
673}
674
675static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
676{
677 u32 reg;
678 bool stage1;
679 struct arm_smmu_cb *cb = &smmu->cbs[idx];
680 struct arm_smmu_cfg *cfg = cb->cfg;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100681 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100682
Robin Murphy90df3732017-08-08 14:56:14 +0100683 cb_base = ARM_SMMU_CB(smmu, idx);
684
685 /* Unassigned context banks only need disabling */
686 if (!cfg) {
687 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
688 return;
689 }
690
Will Deacon45ae7cf2013-06-24 18:31:25 +0100691 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100692 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100693
Robin Murphy90df3732017-08-08 14:56:14 +0100694 /* CBA2R */
Will Deacon4a1c93c2015-03-04 12:21:03 +0000695 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100696 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
697 reg = CBA2R_RW64_64BIT;
698 else
699 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800700 /* 16-bit VMIDs live in CBA2R */
701 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Robin Murphy280b6832017-03-30 17:56:29 +0100702 reg |= cfg->vmid << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800703
Robin Murphy90df3732017-08-08 14:56:14 +0100704 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(idx));
Will Deacon4a1c93c2015-03-04 12:21:03 +0000705 }
706
Will Deacon45ae7cf2013-06-24 18:31:25 +0100707 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100708 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100709 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700710 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100711
Will Deacon57ca90f2014-02-06 14:59:05 +0000712 /*
713 * Use the weakest shareability/memory types, so they are
714 * overridden by the ttbcr/pte.
715 */
716 if (stage1) {
717 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
718 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800719 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
720 /* 8-bit VMIDs live in CBAR */
Robin Murphy280b6832017-03-30 17:56:29 +0100721 reg |= cfg->vmid << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000722 }
Robin Murphy90df3732017-08-08 14:56:14 +0100723 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(idx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100724
Sunil Goutham125458a2017-03-28 16:11:12 +0530725 /*
726 * TTBCR
727 * We must write this before the TTBRs, since it determines the
728 * access behaviour of some fields (in particular, ASID[15:8]).
729 */
Robin Murphy90df3732017-08-08 14:56:14 +0100730 if (stage1 && smmu->version > ARM_SMMU_V1)
731 writel_relaxed(cb->tcr[1], cb_base + ARM_SMMU_CB_TTBCR2);
732 writel_relaxed(cb->tcr[0], cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100733
Will Deacon45ae7cf2013-06-24 18:31:25 +0100734 /* TTBRs */
Robin Murphy90df3732017-08-08 14:56:14 +0100735 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
736 writel_relaxed(cfg->asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
737 writel_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0);
738 writel_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100739 } else {
Robin Murphy90df3732017-08-08 14:56:14 +0100740 writeq_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0);
741 if (stage1)
742 writeq_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100743 }
744
Will Deacon518f7132014-11-14 17:17:54 +0000745 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100746 if (stage1) {
Robin Murphy90df3732017-08-08 14:56:14 +0100747 writel_relaxed(cb->mair[0], cb_base + ARM_SMMU_CB_S1_MAIR0);
748 writel_relaxed(cb->mair[1], cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100749 }
750
Will Deacon45ae7cf2013-06-24 18:31:25 +0100751 /* SCTLR */
Robin Murphy60705292016-08-11 17:44:06 +0100752 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100753 if (stage1)
754 reg |= SCTLR_S1_ASIDPNE;
Robin Murphy90df3732017-08-08 14:56:14 +0100755 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
756 reg |= SCTLR_E;
757
Will Deacon25724842013-08-21 13:49:53 +0100758 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100759}
760
761static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100762 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100763{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100764 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000765 unsigned long ias, oas;
766 struct io_pgtable_ops *pgtbl_ops;
767 struct io_pgtable_cfg pgtbl_cfg;
768 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100769 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100770 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100771
Will Deacon518f7132014-11-14 17:17:54 +0000772 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100773 if (smmu_domain->smmu)
774 goto out_unlock;
775
Will Deacon61bc6712017-01-06 16:56:03 +0000776 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
777 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
778 smmu_domain->smmu = smmu;
779 goto out_unlock;
780 }
781
Will Deaconc752ce42014-06-25 22:46:31 +0100782 /*
783 * Mapping the requested stage onto what we support is surprisingly
784 * complicated, mainly because the spec allows S1+S2 SMMUs without
785 * support for nested translation. That means we end up with the
786 * following table:
787 *
788 * Requested Supported Actual
789 * S1 N S1
790 * S1 S1+S2 S1
791 * S1 S2 S2
792 * S1 S1 S1
793 * N N N
794 * N S1+S2 S2
795 * N S2 S2
796 * N S1 S1
797 *
798 * Note that you can't actually request stage-2 mappings.
799 */
800 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
801 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
802 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
803 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
804
Robin Murphy7602b872016-04-28 17:12:09 +0100805 /*
806 * Choosing a suitable context format is even more fiddly. Until we
807 * grow some way for the caller to express a preference, and/or move
808 * the decision into the io-pgtable code where it arguably belongs,
809 * just aim for the closest thing to the rest of the system, and hope
810 * that the hardware isn't esoteric enough that we can't assume AArch64
811 * support to be a superset of AArch32 support...
812 */
813 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
814 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100815 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
816 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
817 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
818 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
819 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100820 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
821 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
822 ARM_SMMU_FEAT_FMT_AARCH64_16K |
823 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
824 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
825
826 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
827 ret = -EINVAL;
828 goto out_unlock;
829 }
830
Will Deaconc752ce42014-06-25 22:46:31 +0100831 switch (smmu_domain->stage) {
832 case ARM_SMMU_DOMAIN_S1:
833 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
834 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000835 ias = smmu->va_size;
836 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100837 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000838 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +0100839 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +0000840 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100841 ias = min(ias, 32UL);
842 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +0100843 } else {
844 fmt = ARM_V7S;
845 ias = min(ias, 32UL);
846 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +0100847 }
Robin Murphy32b12442017-09-28 15:55:01 +0100848 smmu_domain->tlb_ops = &arm_smmu_s1_tlb_ops;
Will Deaconc752ce42014-06-25 22:46:31 +0100849 break;
850 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100851 /*
852 * We will likely want to change this if/when KVM gets
853 * involved.
854 */
Will Deaconc752ce42014-06-25 22:46:31 +0100855 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100856 cfg->cbar = CBAR_TYPE_S2_TRANS;
857 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000858 ias = smmu->ipa_size;
859 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100860 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000861 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100862 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000863 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100864 ias = min(ias, 40UL);
865 oas = min(oas, 40UL);
866 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100867 if (smmu->version == ARM_SMMU_V2)
Robin Murphy32b12442017-09-28 15:55:01 +0100868 smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v2;
Robin Murphy11febfc2017-03-30 17:56:31 +0100869 else
Robin Murphy32b12442017-09-28 15:55:01 +0100870 smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v1;
Will Deaconc752ce42014-06-25 22:46:31 +0100871 break;
872 default:
873 ret = -EINVAL;
874 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100875 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100876 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
877 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200878 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100879 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100880
Will Deacon44680ee2014-06-25 11:29:12 +0100881 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100882 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100883 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
884 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100885 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100886 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100887 }
888
Robin Murphy280b6832017-03-30 17:56:29 +0100889 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
890 cfg->vmid = cfg->cbndx + 1 + smmu->cavium_id_base;
891 else
892 cfg->asid = cfg->cbndx + smmu->cavium_id_base;
893
Will Deacon518f7132014-11-14 17:17:54 +0000894 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100895 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000896 .ias = ias,
897 .oas = oas,
Robin Murphy32b12442017-09-28 15:55:01 +0100898 .tlb = smmu_domain->tlb_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100899 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000900 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100901
Robin Murphy81b3c252017-06-22 16:53:53 +0100902 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
903 pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
904
Robin Murphy44f68762018-09-20 17:10:27 +0100905 if (smmu_domain->non_strict)
906 pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
907
Will Deacon518f7132014-11-14 17:17:54 +0000908 smmu_domain->smmu = smmu;
909 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
910 if (!pgtbl_ops) {
911 ret = -ENOMEM;
912 goto out_clear_smmu;
913 }
914
Robin Murphyd5466352016-05-09 17:20:09 +0100915 /* Update the domain's page sizes to reflect the page table format */
916 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Robin Murphy455eb7d2016-09-12 17:13:58 +0100917 domain->geometry.aperture_end = (1UL << ias) - 1;
918 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +0000919
920 /* Initialise the context bank with our page table cfg */
921 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
Robin Murphy90df3732017-08-08 14:56:14 +0100922 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000923
924 /*
925 * Request context fault interrupt. Do this last to avoid the
926 * handler seeing a half-initialised domain state.
927 */
Will Deacon44680ee2014-06-25 11:29:12 +0100928 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800929 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
930 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200931 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100932 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100933 cfg->irptndx, irq);
934 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100935 }
936
Will Deacon518f7132014-11-14 17:17:54 +0000937 mutex_unlock(&smmu_domain->init_mutex);
938
939 /* Publish page table ops for map/unmap */
940 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100941 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100942
Will Deacon518f7132014-11-14 17:17:54 +0000943out_clear_smmu:
944 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100945out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +0000946 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100947 return ret;
948}
949
950static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
951{
Joerg Roedel1d672632015-03-26 13:43:10 +0100952 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100953 struct arm_smmu_device *smmu = smmu_domain->smmu;
954 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Sricharan Rd4a44f02018-12-04 11:52:10 +0530955 int ret, irq;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100956
Will Deacon61bc6712017-01-06 16:56:03 +0000957 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100958 return;
959
Sricharan Rd4a44f02018-12-04 11:52:10 +0530960 ret = arm_smmu_rpm_get(smmu);
961 if (ret < 0)
962 return;
963
Will Deacon518f7132014-11-14 17:17:54 +0000964 /*
965 * Disable the context bank and free the page tables before freeing
966 * it.
967 */
Robin Murphy90df3732017-08-08 14:56:14 +0100968 smmu->cbs[cfg->cbndx].cfg = NULL;
969 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100970
Will Deacon44680ee2014-06-25 11:29:12 +0100971 if (cfg->irptndx != INVALID_IRPTNDX) {
972 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800973 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100974 }
975
Markus Elfring44830b02015-11-06 18:32:41 +0100976 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +0100977 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Sricharan Rd4a44f02018-12-04 11:52:10 +0530978
979 arm_smmu_rpm_put(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100980}
981
Joerg Roedel1d672632015-03-26 13:43:10 +0100982static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100983{
984 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100985
Will Deacon61bc6712017-01-06 16:56:03 +0000986 if (type != IOMMU_DOMAIN_UNMANAGED &&
987 type != IOMMU_DOMAIN_DMA &&
988 type != IOMMU_DOMAIN_IDENTITY)
Joerg Roedel1d672632015-03-26 13:43:10 +0100989 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100990 /*
991 * Allocate the domain and initialise some of its data structures.
992 * We can't really do anything meaningful until we've added a
993 * master.
994 */
995 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
996 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +0100997 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100998
Robin Murphy021bb842016-09-14 15:26:46 +0100999 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
1000 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +00001001 kfree(smmu_domain);
1002 return NULL;
1003 }
1004
Will Deacon518f7132014-11-14 17:17:54 +00001005 mutex_init(&smmu_domain->init_mutex);
Robin Murphy523d7422017-06-22 16:53:56 +01001006 spin_lock_init(&smmu_domain->cb_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001007
1008 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001009}
1010
Joerg Roedel1d672632015-03-26 13:43:10 +01001011static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001012{
Joerg Roedel1d672632015-03-26 13:43:10 +01001013 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001014
1015 /*
1016 * Free the domain resources. We assume that all devices have
1017 * already been detached.
1018 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001019 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001020 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001021 kfree(smmu_domain);
1022}
1023
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001024static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1025{
1026 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyf80cd882016-09-14 15:21:39 +01001027 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001028
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001029 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001030 reg |= SMR_VALID;
1031 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1032}
1033
Robin Murphy8e8b2032016-09-12 17:13:50 +01001034static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1035{
1036 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1037 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1038 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1039 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1040
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001041 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
1042 smmu->smrs[idx].valid)
1043 reg |= S2CR_EXIDVALID;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001044 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1045}
1046
1047static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1048{
1049 arm_smmu_write_s2cr(smmu, idx);
1050 if (smmu->smrs)
1051 arm_smmu_write_smr(smmu, idx);
1052}
1053
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001054/*
1055 * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
1056 * should be called after sCR0 is written.
1057 */
1058static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
1059{
1060 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1061 u32 smr;
1062
1063 if (!smmu->smrs)
1064 return;
1065
1066 /*
1067 * SMR.ID bits may not be preserved if the corresponding MASK
1068 * bits are set, so check each one separately. We can reject
1069 * masters later if they try to claim IDs outside these masks.
1070 */
1071 smr = smmu->streamid_mask << SMR_ID_SHIFT;
1072 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1073 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1074 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
1075
1076 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1077 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1078 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1079 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
1080}
1081
Robin Murphy588888a2016-09-12 17:13:54 +01001082static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001083{
1084 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy588888a2016-09-12 17:13:54 +01001085 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001086
Robin Murphy588888a2016-09-12 17:13:54 +01001087 /* Stream indexing is blissfully easy */
1088 if (!smrs)
1089 return id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001090
Robin Murphy588888a2016-09-12 17:13:54 +01001091 /* Validating SMRs is... less so */
1092 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1093 if (!smrs[i].valid) {
1094 /*
1095 * Note the first free entry we come across, which
1096 * we'll claim in the end if nothing else matches.
1097 */
1098 if (free_idx < 0)
1099 free_idx = i;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001100 continue;
1101 }
Robin Murphy588888a2016-09-12 17:13:54 +01001102 /*
1103 * If the new entry is _entirely_ matched by an existing entry,
1104 * then reuse that, with the guarantee that there also cannot
1105 * be any subsequent conflicting entries. In normal use we'd
1106 * expect simply identical entries for this case, but there's
1107 * no harm in accommodating the generalisation.
1108 */
1109 if ((mask & smrs[i].mask) == mask &&
1110 !((id ^ smrs[i].id) & ~smrs[i].mask))
1111 return i;
1112 /*
1113 * If the new entry has any other overlap with an existing one,
1114 * though, then there always exists at least one stream ID
1115 * which would cause a conflict, and we can't allow that risk.
1116 */
1117 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1118 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001119 }
1120
Robin Murphy588888a2016-09-12 17:13:54 +01001121 return free_idx;
1122}
1123
1124static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1125{
1126 if (--smmu->s2crs[idx].count)
1127 return false;
1128
1129 smmu->s2crs[idx] = s2cr_init_val;
1130 if (smmu->smrs)
1131 smmu->smrs[idx].valid = false;
1132
1133 return true;
1134}
1135
1136static int arm_smmu_master_alloc_smes(struct device *dev)
1137{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001138 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001139 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy588888a2016-09-12 17:13:54 +01001140 struct arm_smmu_device *smmu = cfg->smmu;
1141 struct arm_smmu_smr *smrs = smmu->smrs;
1142 struct iommu_group *group;
1143 int i, idx, ret;
1144
1145 mutex_lock(&smmu->stream_map_mutex);
1146 /* Figure out a viable stream map entry allocation */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001147 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy021bb842016-09-14 15:26:46 +01001148 u16 sid = fwspec->ids[i];
1149 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1150
Robin Murphy588888a2016-09-12 17:13:54 +01001151 if (idx != INVALID_SMENDX) {
1152 ret = -EEXIST;
1153 goto out_err;
1154 }
1155
Robin Murphy021bb842016-09-14 15:26:46 +01001156 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy588888a2016-09-12 17:13:54 +01001157 if (ret < 0)
1158 goto out_err;
1159
1160 idx = ret;
1161 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy021bb842016-09-14 15:26:46 +01001162 smrs[idx].id = sid;
1163 smrs[idx].mask = mask;
Robin Murphy588888a2016-09-12 17:13:54 +01001164 smrs[idx].valid = true;
1165 }
1166 smmu->s2crs[idx].count++;
1167 cfg->smendx[i] = (s16)idx;
1168 }
1169
1170 group = iommu_group_get_for_dev(dev);
1171 if (!group)
1172 group = ERR_PTR(-ENOMEM);
1173 if (IS_ERR(group)) {
1174 ret = PTR_ERR(group);
1175 goto out_err;
1176 }
1177 iommu_group_put(group);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001178
Will Deacon45ae7cf2013-06-24 18:31:25 +01001179 /* It worked! Now, poke the actual hardware */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001180 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001181 arm_smmu_write_sme(smmu, idx);
1182 smmu->s2crs[idx].group = group;
1183 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001184
Robin Murphy588888a2016-09-12 17:13:54 +01001185 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001186 return 0;
1187
Robin Murphy588888a2016-09-12 17:13:54 +01001188out_err:
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001189 while (i--) {
Robin Murphy588888a2016-09-12 17:13:54 +01001190 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001191 cfg->smendx[i] = INVALID_SMENDX;
1192 }
Robin Murphy588888a2016-09-12 17:13:54 +01001193 mutex_unlock(&smmu->stream_map_mutex);
1194 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001195}
1196
Robin Murphyadfec2e2016-09-12 17:13:55 +01001197static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001198{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001199 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1200 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphyd3097e32016-09-12 17:13:53 +01001201 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001202
Robin Murphy588888a2016-09-12 17:13:54 +01001203 mutex_lock(&smmu->stream_map_mutex);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001204 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001205 if (arm_smmu_free_sme(smmu, idx))
1206 arm_smmu_write_sme(smmu, idx);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001207 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001208 }
Robin Murphy588888a2016-09-12 17:13:54 +01001209 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001210}
1211
Will Deacon45ae7cf2013-06-24 18:31:25 +01001212static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphyadfec2e2016-09-12 17:13:55 +01001213 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001214{
Will Deacon44680ee2014-06-25 11:29:12 +01001215 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001216 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001217 u8 cbndx = smmu_domain->cfg.cbndx;
Will Deacon61bc6712017-01-06 16:56:03 +00001218 enum arm_smmu_s2cr_type type;
Robin Murphy588888a2016-09-12 17:13:54 +01001219 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001220
Will Deacon61bc6712017-01-06 16:56:03 +00001221 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
1222 type = S2CR_TYPE_BYPASS;
1223 else
1224 type = S2CR_TYPE_TRANS;
1225
Robin Murphyadfec2e2016-09-12 17:13:55 +01001226 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy8e8b2032016-09-12 17:13:50 +01001227 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy588888a2016-09-12 17:13:54 +01001228 continue;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001229
Robin Murphy8e8b2032016-09-12 17:13:50 +01001230 s2cr[idx].type = type;
Sricharan Re1989802017-01-06 18:58:15 +05301231 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001232 s2cr[idx].cbndx = cbndx;
1233 arm_smmu_write_s2cr(smmu, idx);
Will Deacon43b412b2014-07-15 11:22:24 +01001234 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001235 return 0;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001236}
1237
Will Deacon45ae7cf2013-06-24 18:31:25 +01001238static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1239{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001240 int ret;
Joerg Roedel9b468f72018-11-29 14:01:00 +01001241 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001242 struct arm_smmu_device *smmu;
Joerg Roedel1d672632015-03-26 13:43:10 +01001243 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001244
Robin Murphyadfec2e2016-09-12 17:13:55 +01001245 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001246 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1247 return -ENXIO;
1248 }
1249
Robin Murphyfba4f8e2016-10-17 12:06:21 +01001250 /*
1251 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1252 * domains between of_xlate() and add_device() - we have no way to cope
1253 * with that, so until ARM gets converted to rely on groups and default
1254 * domains, just say no (but more politely than by dereferencing NULL).
1255 * This should be at least a WARN_ON once that's sorted.
1256 */
1257 if (!fwspec->iommu_priv)
1258 return -ENODEV;
1259
Robin Murphyadfec2e2016-09-12 17:13:55 +01001260 smmu = fwspec_smmu(fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301261
1262 ret = arm_smmu_rpm_get(smmu);
1263 if (ret < 0)
1264 return ret;
1265
Will Deacon518f7132014-11-14 17:17:54 +00001266 /* Ensure that the domain is finalised */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001267 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001268 if (ret < 0)
Sricharan Rd4a44f02018-12-04 11:52:10 +05301269 goto rpm_put;
Will Deacon518f7132014-11-14 17:17:54 +00001270
Will Deacon45ae7cf2013-06-24 18:31:25 +01001271 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001272 * Sanity check the domain. We don't support domains across
1273 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001274 */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001275 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001276 dev_err(dev,
1277 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphyadfec2e2016-09-12 17:13:55 +01001278 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Sricharan Rd4a44f02018-12-04 11:52:10 +05301279 ret = -EINVAL;
1280 goto rpm_put;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001281 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001282
1283 /* Looks ok, so add the device to the domain */
Sricharan Rd4a44f02018-12-04 11:52:10 +05301284 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
1285
1286rpm_put:
1287 arm_smmu_rpm_put(smmu);
1288 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001289}
1290
Will Deacon45ae7cf2013-06-24 18:31:25 +01001291static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001292 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001293{
Robin Murphy523d7422017-06-22 16:53:56 +01001294 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301295 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1296 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001297
Will Deacon518f7132014-11-14 17:17:54 +00001298 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001299 return -ENODEV;
1300
Sricharan Rd4a44f02018-12-04 11:52:10 +05301301 arm_smmu_rpm_get(smmu);
1302 ret = ops->map(ops, iova, paddr, size, prot);
1303 arm_smmu_rpm_put(smmu);
1304
1305 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001306}
1307
1308static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1309 size_t size)
1310{
Robin Murphy523d7422017-06-22 16:53:56 +01001311 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301312 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1313 size_t ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001314
Will Deacon518f7132014-11-14 17:17:54 +00001315 if (!ops)
1316 return 0;
1317
Sricharan Rd4a44f02018-12-04 11:52:10 +05301318 arm_smmu_rpm_get(smmu);
1319 ret = ops->unmap(ops, iova, size);
1320 arm_smmu_rpm_put(smmu);
1321
1322 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001323}
1324
Robin Murphy44f68762018-09-20 17:10:27 +01001325static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
1326{
1327 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301328 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy44f68762018-09-20 17:10:27 +01001329
Sricharan Rd4a44f02018-12-04 11:52:10 +05301330 if (smmu_domain->tlb_ops) {
1331 arm_smmu_rpm_get(smmu);
Robin Murphy44f68762018-09-20 17:10:27 +01001332 smmu_domain->tlb_ops->tlb_flush_all(smmu_domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301333 arm_smmu_rpm_put(smmu);
1334 }
Robin Murphy44f68762018-09-20 17:10:27 +01001335}
1336
Robin Murphy32b12442017-09-28 15:55:01 +01001337static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
1338{
1339 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301340 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy32b12442017-09-28 15:55:01 +01001341
Sricharan Rd4a44f02018-12-04 11:52:10 +05301342 if (smmu_domain->tlb_ops) {
1343 arm_smmu_rpm_get(smmu);
Robin Murphy32b12442017-09-28 15:55:01 +01001344 smmu_domain->tlb_ops->tlb_sync(smmu_domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301345 arm_smmu_rpm_put(smmu);
1346 }
Robin Murphy32b12442017-09-28 15:55:01 +01001347}
1348
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001349static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1350 dma_addr_t iova)
1351{
Joerg Roedel1d672632015-03-26 13:43:10 +01001352 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001353 struct arm_smmu_device *smmu = smmu_domain->smmu;
1354 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1355 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1356 struct device *dev = smmu->dev;
1357 void __iomem *cb_base;
1358 u32 tmp;
1359 u64 phys;
Robin Murphy523d7422017-06-22 16:53:56 +01001360 unsigned long va, flags;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301361 int ret;
1362
1363 ret = arm_smmu_rpm_get(smmu);
1364 if (ret < 0)
1365 return 0;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001366
Robin Murphy452107c2017-03-30 17:56:30 +01001367 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001368
Robin Murphy523d7422017-06-22 16:53:56 +01001369 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy661d9622015-05-27 17:09:34 +01001370 /* ATS1 registers can only be written atomically */
1371 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001372 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001373 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1374 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001375 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001376
1377 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1378 !(tmp & ATSR_ACTIVE), 5, 50)) {
Robin Murphy523d7422017-06-22 16:53:56 +01001379 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001380 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001381 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001382 &iova);
1383 return ops->iova_to_phys(ops, iova);
1384 }
1385
Robin Murphyf9a05f02016-04-13 18:13:01 +01001386 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Robin Murphy523d7422017-06-22 16:53:56 +01001387 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001388 if (phys & CB_PAR_F) {
1389 dev_err(dev, "translation fault!\n");
1390 dev_err(dev, "PAR = 0x%llx\n", phys);
1391 return 0;
1392 }
1393
Sricharan Rd4a44f02018-12-04 11:52:10 +05301394 arm_smmu_rpm_put(smmu);
1395
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001396 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1397}
1398
Will Deacon45ae7cf2013-06-24 18:31:25 +01001399static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001400 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001401{
Joerg Roedel1d672632015-03-26 13:43:10 +01001402 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Robin Murphy523d7422017-06-22 16:53:56 +01001403 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001404
Sunil Gouthambdf95922017-04-25 15:27:52 +05301405 if (domain->type == IOMMU_DOMAIN_IDENTITY)
1406 return iova;
1407
Will Deacon518f7132014-11-14 17:17:54 +00001408 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001409 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001410
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001411 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
Robin Murphy523d7422017-06-22 16:53:56 +01001412 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1413 return arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001414
Robin Murphy523d7422017-06-22 16:53:56 +01001415 return ops->iova_to_phys(ops, iova);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001416}
1417
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001418static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001419{
Will Deacond0948942014-06-24 17:30:10 +01001420 switch (cap) {
1421 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001422 /*
1423 * Return true here as the SMMU can always send out coherent
1424 * requests.
1425 */
1426 return true;
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001427 case IOMMU_CAP_NOEXEC:
1428 return true;
Will Deacond0948942014-06-24 17:30:10 +01001429 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001430 return false;
Will Deacond0948942014-06-24 17:30:10 +01001431 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001432}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001433
Suzuki K Poulose92ce7e82019-06-14 18:54:00 +01001434static int arm_smmu_match_node(struct device *dev, const void *data)
Robin Murphy021bb842016-09-14 15:26:46 +01001435{
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001436 return dev->fwnode == data;
Robin Murphy021bb842016-09-14 15:26:46 +01001437}
1438
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001439static
1440struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
Robin Murphy021bb842016-09-14 15:26:46 +01001441{
1442 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001443 fwnode, arm_smmu_match_node);
Robin Murphy021bb842016-09-14 15:26:46 +01001444 put_device(dev);
1445 return dev ? dev_get_drvdata(dev) : NULL;
1446}
1447
Will Deacon03edb222015-01-19 14:27:33 +00001448static int arm_smmu_add_device(struct device *dev)
1449{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001450 struct arm_smmu_device *smmu;
Robin Murphyf80cd882016-09-14 15:21:39 +01001451 struct arm_smmu_master_cfg *cfg;
Joerg Roedel9b468f72018-11-29 14:01:00 +01001452 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001453 int i, ret;
1454
Robin Murphy021bb842016-09-14 15:26:46 +01001455 if (using_legacy_binding) {
1456 ret = arm_smmu_register_legacy_master(dev, &smmu);
Artem Savkova7990c62017-08-08 12:26:02 +02001457
1458 /*
1459 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
1460 * will allocate/initialise a new one. Thus we need to update fwspec for
1461 * later use.
1462 */
Joerg Roedel9b468f72018-11-29 14:01:00 +01001463 fwspec = dev_iommu_fwspec_get(dev);
Robin Murphy021bb842016-09-14 15:26:46 +01001464 if (ret)
1465 goto out_free;
Robin Murphy3c117b52016-11-02 17:31:32 +00001466 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001467 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
Robin Murphy021bb842016-09-14 15:26:46 +01001468 } else {
1469 return -ENODEV;
1470 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001471
1472 ret = -EINVAL;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001473 for (i = 0; i < fwspec->num_ids; i++) {
1474 u16 sid = fwspec->ids[i];
Robin Murphy021bb842016-09-14 15:26:46 +01001475 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyf80cd882016-09-14 15:21:39 +01001476
Robin Murphyadfec2e2016-09-12 17:13:55 +01001477 if (sid & ~smmu->streamid_mask) {
Robin Murphyf80cd882016-09-14 15:21:39 +01001478 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy021bb842016-09-14 15:26:46 +01001479 sid, smmu->streamid_mask);
1480 goto out_free;
1481 }
1482 if (mask & ~smmu->smr_mask_mask) {
1483 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
Peng Fan6323f472017-04-21 17:03:36 +08001484 mask, smmu->smr_mask_mask);
Robin Murphyf80cd882016-09-14 15:21:39 +01001485 goto out_free;
1486 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001487 }
Will Deacon03edb222015-01-19 14:27:33 +00001488
Robin Murphyadfec2e2016-09-12 17:13:55 +01001489 ret = -ENOMEM;
1490 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1491 GFP_KERNEL);
1492 if (!cfg)
1493 goto out_free;
1494
1495 cfg->smmu = smmu;
1496 fwspec->iommu_priv = cfg;
1497 while (i--)
1498 cfg->smendx[i] = INVALID_SMENDX;
1499
Sricharan Rd4a44f02018-12-04 11:52:10 +05301500 ret = arm_smmu_rpm_get(smmu);
1501 if (ret < 0)
1502 goto out_cfg_free;
1503
Robin Murphy588888a2016-09-12 17:13:54 +01001504 ret = arm_smmu_master_alloc_smes(dev);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301505 arm_smmu_rpm_put(smmu);
1506
Robin Murphyadfec2e2016-09-12 17:13:55 +01001507 if (ret)
Vivek Gautamc54451a2017-07-06 15:07:00 +05301508 goto out_cfg_free;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001509
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001510 iommu_device_link(&smmu->iommu, dev);
1511
Sricharan R655e3642018-12-04 11:52:11 +05301512 device_link_add(dev, smmu->dev,
1513 DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
1514
Robin Murphyadfec2e2016-09-12 17:13:55 +01001515 return 0;
Robin Murphyf80cd882016-09-14 15:21:39 +01001516
Vivek Gautamc54451a2017-07-06 15:07:00 +05301517out_cfg_free:
1518 kfree(cfg);
Robin Murphyf80cd882016-09-14 15:21:39 +01001519out_free:
Robin Murphyadfec2e2016-09-12 17:13:55 +01001520 iommu_fwspec_free(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001521 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00001522}
1523
Will Deacon45ae7cf2013-06-24 18:31:25 +01001524static void arm_smmu_remove_device(struct device *dev)
1525{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001526 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001527 struct arm_smmu_master_cfg *cfg;
1528 struct arm_smmu_device *smmu;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301529 int ret;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001530
Robin Murphyadfec2e2016-09-12 17:13:55 +01001531 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyf80cd882016-09-14 15:21:39 +01001532 return;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001533
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001534 cfg = fwspec->iommu_priv;
1535 smmu = cfg->smmu;
1536
Sricharan Rd4a44f02018-12-04 11:52:10 +05301537 ret = arm_smmu_rpm_get(smmu);
1538 if (ret < 0)
1539 return;
1540
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001541 iommu_device_unlink(&smmu->iommu, dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001542 arm_smmu_master_free_smes(fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301543
1544 arm_smmu_rpm_put(smmu);
1545
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001546 iommu_group_remove_device(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001547 kfree(fwspec->iommu_priv);
1548 iommu_fwspec_free(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001549}
1550
Joerg Roedelaf659932015-10-21 23:51:41 +02001551static struct iommu_group *arm_smmu_device_group(struct device *dev)
1552{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001553 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001554 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy588888a2016-09-12 17:13:54 +01001555 struct iommu_group *group = NULL;
1556 int i, idx;
1557
Robin Murphyadfec2e2016-09-12 17:13:55 +01001558 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001559 if (group && smmu->s2crs[idx].group &&
1560 group != smmu->s2crs[idx].group)
1561 return ERR_PTR(-EINVAL);
1562
1563 group = smmu->s2crs[idx].group;
1564 }
1565
1566 if (group)
Robin Murphye1b44cb2016-11-11 17:59:22 +00001567 return iommu_group_ref_get(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001568
1569 if (dev_is_pci(dev))
1570 group = pci_device_group(dev);
Nipun Guptaeab03e22018-09-10 19:19:18 +05301571 else if (dev_is_fsl_mc(dev))
1572 group = fsl_mc_device_group(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001573 else
1574 group = generic_device_group(dev);
1575
Joerg Roedelaf659932015-10-21 23:51:41 +02001576 return group;
1577}
1578
Will Deaconc752ce42014-06-25 22:46:31 +01001579static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1580 enum iommu_attr attr, void *data)
1581{
Joerg Roedel1d672632015-03-26 13:43:10 +01001582 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001583
Robin Murphy44f68762018-09-20 17:10:27 +01001584 switch(domain->type) {
1585 case IOMMU_DOMAIN_UNMANAGED:
1586 switch (attr) {
1587 case DOMAIN_ATTR_NESTING:
1588 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1589 return 0;
1590 default:
1591 return -ENODEV;
1592 }
1593 break;
1594 case IOMMU_DOMAIN_DMA:
1595 switch (attr) {
1596 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1597 *(int *)data = smmu_domain->non_strict;
1598 return 0;
1599 default:
1600 return -ENODEV;
1601 }
1602 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001603 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001604 return -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001605 }
1606}
1607
1608static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1609 enum iommu_attr attr, void *data)
1610{
Will Deacon518f7132014-11-14 17:17:54 +00001611 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001612 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001613
Will Deacon518f7132014-11-14 17:17:54 +00001614 mutex_lock(&smmu_domain->init_mutex);
1615
Robin Murphy44f68762018-09-20 17:10:27 +01001616 switch(domain->type) {
1617 case IOMMU_DOMAIN_UNMANAGED:
1618 switch (attr) {
1619 case DOMAIN_ATTR_NESTING:
1620 if (smmu_domain->smmu) {
1621 ret = -EPERM;
1622 goto out_unlock;
1623 }
1624
1625 if (*(int *)data)
1626 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1627 else
1628 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1629 break;
1630 default:
1631 ret = -ENODEV;
Will Deacon518f7132014-11-14 17:17:54 +00001632 }
Robin Murphy44f68762018-09-20 17:10:27 +01001633 break;
1634 case IOMMU_DOMAIN_DMA:
1635 switch (attr) {
1636 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1637 smmu_domain->non_strict = *(int *)data;
1638 break;
1639 default:
1640 ret = -ENODEV;
1641 }
Will Deacon518f7132014-11-14 17:17:54 +00001642 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001643 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001644 ret = -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001645 }
Will Deacon518f7132014-11-14 17:17:54 +00001646out_unlock:
1647 mutex_unlock(&smmu_domain->init_mutex);
1648 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001649}
1650
Robin Murphy021bb842016-09-14 15:26:46 +01001651static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1652{
Robin Murphy56fbf602017-03-31 12:03:33 +01001653 u32 mask, fwid = 0;
Robin Murphy021bb842016-09-14 15:26:46 +01001654
1655 if (args->args_count > 0)
1656 fwid |= (u16)args->args[0];
1657
1658 if (args->args_count > 1)
1659 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
Robin Murphy56fbf602017-03-31 12:03:33 +01001660 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
1661 fwid |= (u16)mask << SMR_MASK_SHIFT;
Robin Murphy021bb842016-09-14 15:26:46 +01001662
1663 return iommu_fwspec_add_ids(dev, &fwid, 1);
1664}
1665
Eric Augerf3ebee82017-01-19 20:57:55 +00001666static void arm_smmu_get_resv_regions(struct device *dev,
1667 struct list_head *head)
1668{
1669 struct iommu_resv_region *region;
1670 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1671
1672 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00001673 prot, IOMMU_RESV_SW_MSI);
Eric Augerf3ebee82017-01-19 20:57:55 +00001674 if (!region)
1675 return;
1676
1677 list_add_tail(&region->list, head);
Robin Murphy273df962017-03-16 17:00:19 +00001678
1679 iommu_dma_get_resv_regions(dev, head);
Eric Augerf3ebee82017-01-19 20:57:55 +00001680}
1681
1682static void arm_smmu_put_resv_regions(struct device *dev,
1683 struct list_head *head)
1684{
1685 struct iommu_resv_region *entry, *next;
1686
1687 list_for_each_entry_safe(entry, next, head, list)
1688 kfree(entry);
1689}
1690
Will Deacon518f7132014-11-14 17:17:54 +00001691static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001692 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001693 .domain_alloc = arm_smmu_domain_alloc,
1694 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001695 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001696 .map = arm_smmu_map,
1697 .unmap = arm_smmu_unmap,
Robin Murphy44f68762018-09-20 17:10:27 +01001698 .flush_iotlb_all = arm_smmu_flush_iotlb_all,
Robin Murphy32b12442017-09-28 15:55:01 +01001699 .iotlb_sync = arm_smmu_iotlb_sync,
Will Deaconc752ce42014-06-25 22:46:31 +01001700 .iova_to_phys = arm_smmu_iova_to_phys,
1701 .add_device = arm_smmu_add_device,
1702 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001703 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001704 .domain_get_attr = arm_smmu_domain_get_attr,
1705 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy021bb842016-09-14 15:26:46 +01001706 .of_xlate = arm_smmu_of_xlate,
Eric Augerf3ebee82017-01-19 20:57:55 +00001707 .get_resv_regions = arm_smmu_get_resv_regions,
1708 .put_resv_regions = arm_smmu_put_resv_regions,
Will Deacon518f7132014-11-14 17:17:54 +00001709 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001710};
1711
1712static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1713{
1714 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001715 int i;
Peng Fan3ca37122016-05-03 21:50:30 +08001716 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001717
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001718 /* clear global FSR */
1719 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1720 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001721
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001722 /*
1723 * Reset stream mapping groups: Initial values mark all SMRn as
1724 * invalid and all S2CRn as bypass unless overridden.
1725 */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001726 for (i = 0; i < smmu->num_mapping_groups; ++i)
1727 arm_smmu_write_sme(smmu, i);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001728
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301729 if (smmu->model == ARM_MMU500) {
1730 /*
1731 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1732 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1733 * bit is only present in MMU-500r2 onwards.
1734 */
1735 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1736 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
Peng Fan3ca37122016-05-03 21:50:30 +08001737 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301738 if (major >= 2)
1739 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1740 /*
1741 * Allow unmatched Stream IDs to allocate bypass
1742 * TLB entries for reduced latency.
1743 */
Feng Kan74f55d32017-10-11 15:08:39 -07001744 reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN;
Peng Fan3ca37122016-05-03 21:50:30 +08001745 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1746 }
1747
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001748 /* Make sure all context banks are disabled and clear CB_FSR */
1749 for (i = 0; i < smmu->num_context_banks; ++i) {
Robin Murphy90df3732017-08-08 14:56:14 +01001750 void __iomem *cb_base = ARM_SMMU_CB(smmu, i);
1751
1752 arm_smmu_write_context_bank(smmu, i);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001753 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001754 /*
1755 * Disable MMU-500's not-particularly-beneficial next-page
1756 * prefetcher for the sake of errata #841119 and #826419.
1757 */
1758 if (smmu->model == ARM_MMU500) {
1759 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1760 reg &= ~ARM_MMU500_ACTLR_CPRE;
1761 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1762 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001763 }
Will Deacon1463fe42013-07-31 19:21:27 +01001764
Will Deacon45ae7cf2013-06-24 18:31:25 +01001765 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001766 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1767 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1768
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001769 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001770
Will Deacon45ae7cf2013-06-24 18:31:25 +01001771 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001772 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001773
1774 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001775 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001776
Robin Murphy25a1c962016-02-10 14:25:33 +00001777 /* Enable client access, handling unmatched streams as appropriate */
1778 reg &= ~sCR0_CLIENTPD;
1779 if (disable_bypass)
1780 reg |= sCR0_USFCFG;
1781 else
1782 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001783
1784 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001785 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001786
1787 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001788 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001789
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001790 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1791 reg |= sCR0_VMID16EN;
1792
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001793 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1794 reg |= sCR0_EXIDENABLE;
1795
Will Deacon45ae7cf2013-06-24 18:31:25 +01001796 /* Push the button */
Robin Murphy11febfc2017-03-30 17:56:31 +01001797 arm_smmu_tlb_sync_global(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001798 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001799}
1800
1801static int arm_smmu_id_size_to_bits(int size)
1802{
1803 switch (size) {
1804 case 0:
1805 return 32;
1806 case 1:
1807 return 36;
1808 case 2:
1809 return 40;
1810 case 3:
1811 return 42;
1812 case 4:
1813 return 44;
1814 case 5:
1815 default:
1816 return 48;
1817 }
1818}
1819
1820static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1821{
1822 unsigned long size;
1823 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1824 u32 id;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001825 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001826 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001827
1828 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001829 dev_notice(smmu->dev, "SMMUv%d with:\n",
1830 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001831
1832 /* ID0 */
1833 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001834
1835 /* Restrict available stages based on module parameter */
1836 if (force_stage == 1)
1837 id &= ~(ID0_S2TS | ID0_NTS);
1838 else if (force_stage == 2)
1839 id &= ~(ID0_S1TS | ID0_NTS);
1840
Will Deacon45ae7cf2013-06-24 18:31:25 +01001841 if (id & ID0_S1TS) {
1842 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1843 dev_notice(smmu->dev, "\tstage 1 translation\n");
1844 }
1845
1846 if (id & ID0_S2TS) {
1847 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1848 dev_notice(smmu->dev, "\tstage 2 translation\n");
1849 }
1850
1851 if (id & ID0_NTS) {
1852 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1853 dev_notice(smmu->dev, "\tnested translation\n");
1854 }
1855
1856 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001857 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001858 dev_err(smmu->dev, "\tno translation support!\n");
1859 return -ENODEV;
1860 }
1861
Robin Murphyb7862e32016-04-13 18:13:03 +01001862 if ((id & ID0_S1TS) &&
1863 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001864 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1865 dev_notice(smmu->dev, "\taddress translation ops\n");
1866 }
1867
Robin Murphybae2c2d2015-07-29 19:46:05 +01001868 /*
1869 * In order for DMA API calls to work properly, we must defer to what
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001870 * the FW says about coherency, regardless of what the hardware claims.
Robin Murphybae2c2d2015-07-29 19:46:05 +01001871 * Fortunately, this also opens up a workaround for systems where the
1872 * ID register value has ended up configured incorrectly.
1873 */
Robin Murphybae2c2d2015-07-29 19:46:05 +01001874 cttw_reg = !!(id & ID0_CTTW);
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001875 if (cttw_fw || cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001876 dev_notice(smmu->dev, "\t%scoherent table walk\n",
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001877 cttw_fw ? "" : "non-");
1878 if (cttw_fw != cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001879 dev_notice(smmu->dev,
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001880 "\t(IDR0.CTTW overridden by FW configuration)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001881
Robin Murphy21174242016-09-12 17:13:48 +01001882 /* Max. number of entries we have for stream matching/indexing */
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001883 if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
1884 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1885 size = 1 << 16;
1886 } else {
1887 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
1888 }
Robin Murphy21174242016-09-12 17:13:48 +01001889 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001890 if (id & ID0_SMS) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001891 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy21174242016-09-12 17:13:48 +01001892 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
1893 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001894 dev_err(smmu->dev,
1895 "stream-matching supported, but no SMRs present!\n");
1896 return -ENODEV;
1897 }
1898
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001899 /* Zero-initialised to mark as invalid */
1900 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1901 GFP_KERNEL);
1902 if (!smmu->smrs)
1903 return -ENOMEM;
1904
Will Deacon45ae7cf2013-06-24 18:31:25 +01001905 dev_notice(smmu->dev,
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001906 "\tstream matching with %lu register groups", size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001907 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001908 /* s2cr->type == 0 means translation, so initialise explicitly */
1909 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1910 GFP_KERNEL);
1911 if (!smmu->s2crs)
1912 return -ENOMEM;
1913 for (i = 0; i < size; i++)
1914 smmu->s2crs[i] = s2cr_init_val;
1915
Robin Murphy21174242016-09-12 17:13:48 +01001916 smmu->num_mapping_groups = size;
Robin Murphy588888a2016-09-12 17:13:54 +01001917 mutex_init(&smmu->stream_map_mutex);
Will Deacon8e517e72017-07-06 15:55:48 +01001918 spin_lock_init(&smmu->global_sync_lock);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001919
Robin Murphy7602b872016-04-28 17:12:09 +01001920 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1921 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1922 if (!(id & ID0_PTFS_NO_AARCH32S))
1923 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1924 }
1925
Will Deacon45ae7cf2013-06-24 18:31:25 +01001926 /* ID1 */
1927 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001928 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001929
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001930 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001931 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Robin Murphy452107c2017-03-30 17:56:30 +01001932 size <<= smmu->pgshift;
1933 if (smmu->cb_base != gr0_base + size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001934 dev_warn(smmu->dev,
Robin Murphy452107c2017-03-30 17:56:30 +01001935 "SMMU address space size (0x%lx) differs from mapped region size (0x%tx)!\n",
1936 size * 2, (smmu->cb_base - gr0_base) * 2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001937
Will Deacon518f7132014-11-14 17:17:54 +00001938 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001939 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1940 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1941 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1942 return -ENODEV;
1943 }
1944 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1945 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001946 /*
1947 * Cavium CN88xx erratum #27704.
1948 * Ensure ASID and VMID allocation is unique across all SMMUs in
1949 * the system.
1950 */
1951 if (smmu->model == CAVIUM_SMMUV2) {
1952 smmu->cavium_id_base =
1953 atomic_add_return(smmu->num_context_banks,
1954 &cavium_smmu_context_count);
1955 smmu->cavium_id_base -= smmu->num_context_banks;
Robert Richter53c35dce2017-03-13 11:39:01 +01001956 dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
Robin Murphye086d912016-04-13 18:12:58 +01001957 }
Robin Murphy90df3732017-08-08 14:56:14 +01001958 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
1959 sizeof(*smmu->cbs), GFP_KERNEL);
1960 if (!smmu->cbs)
1961 return -ENOMEM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001962
1963 /* ID2 */
1964 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1965 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001966 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001967
Will Deacon518f7132014-11-14 17:17:54 +00001968 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001969 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001970 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001971
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001972 if (id & ID2_VMID16)
1973 smmu->features |= ARM_SMMU_FEAT_VMID16;
1974
Robin Murphyf1d84542015-03-04 16:41:05 +00001975 /*
1976 * What the page table walker can address actually depends on which
1977 * descriptor format is in use, but since a) we don't know that yet,
1978 * and b) it can vary per context bank, this will have to do...
1979 */
1980 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1981 dev_warn(smmu->dev,
1982 "failed to set DMA mask for table walker\n");
1983
Robin Murphyb7862e32016-04-13 18:13:03 +01001984 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001985 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001986 if (smmu->version == ARM_SMMU_V1_64K)
1987 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001988 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001989 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00001990 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001991 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001992 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001993 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001994 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001995 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001996 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001997 }
1998
Robin Murphy7602b872016-04-28 17:12:09 +01001999 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01002000 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01002001 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01002002 if (smmu->features &
2003 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01002004 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01002005 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01002006 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01002007 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01002008 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01002009
Robin Murphyd5466352016-05-09 17:20:09 +01002010 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2011 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2012 else
2013 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2014 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
2015 smmu->pgsize_bitmap);
2016
Will Deacon518f7132014-11-14 17:17:54 +00002017
Will Deacon28d60072014-09-01 16:24:48 +01002018 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
2019 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002020 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002021
2022 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
2023 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002024 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002025
Will Deacon45ae7cf2013-06-24 18:31:25 +01002026 return 0;
2027}
2028
Robin Murphy67b65a32016-04-13 18:12:57 +01002029struct arm_smmu_match_data {
2030 enum arm_smmu_arch_version version;
2031 enum arm_smmu_implementation model;
2032};
2033
2034#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
Sricharan R96a299d2018-12-04 11:52:09 +05302035static const struct arm_smmu_match_data name = { .version = ver, .model = imp }
Robin Murphy67b65a32016-04-13 18:12:57 +01002036
2037ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
2038ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01002039ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002040ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01002041ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Vivek Gautam89cddc52018-12-04 11:52:13 +05302042ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01002043
Joerg Roedel09b52692014-10-02 12:24:45 +02002044static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01002045 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
2046 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
2047 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01002048 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002049 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01002050 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Vivek Gautam89cddc52018-12-04 11:52:13 +05302051 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01002052 { },
2053};
Robin Murphy09360402014-08-28 17:51:59 +01002054
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002055#ifdef CONFIG_ACPI
2056static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
2057{
2058 int ret = 0;
2059
2060 switch (model) {
2061 case ACPI_IORT_SMMU_V1:
2062 case ACPI_IORT_SMMU_CORELINK_MMU400:
2063 smmu->version = ARM_SMMU_V1;
2064 smmu->model = GENERIC_SMMU;
2065 break;
Robin Murphy84c24372017-06-19 16:41:56 +01002066 case ACPI_IORT_SMMU_CORELINK_MMU401:
2067 smmu->version = ARM_SMMU_V1_64K;
2068 smmu->model = GENERIC_SMMU;
2069 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002070 case ACPI_IORT_SMMU_V2:
2071 smmu->version = ARM_SMMU_V2;
2072 smmu->model = GENERIC_SMMU;
2073 break;
2074 case ACPI_IORT_SMMU_CORELINK_MMU500:
2075 smmu->version = ARM_SMMU_V2;
2076 smmu->model = ARM_MMU500;
2077 break;
Robin Murphy84c24372017-06-19 16:41:56 +01002078 case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
2079 smmu->version = ARM_SMMU_V2;
2080 smmu->model = CAVIUM_SMMUV2;
2081 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002082 default:
2083 ret = -ENODEV;
2084 }
2085
2086 return ret;
2087}
2088
2089static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2090 struct arm_smmu_device *smmu)
2091{
2092 struct device *dev = smmu->dev;
2093 struct acpi_iort_node *node =
2094 *(struct acpi_iort_node **)dev_get_platdata(dev);
2095 struct acpi_iort_smmu *iort_smmu;
2096 int ret;
2097
2098 /* Retrieve SMMU1/2 specific data */
2099 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
2100
2101 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
2102 if (ret < 0)
2103 return ret;
2104
2105 /* Ignore the configuration access interrupt */
2106 smmu->num_global_irqs = 1;
2107
2108 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
2109 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2110
2111 return 0;
2112}
2113#else
2114static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2115 struct arm_smmu_device *smmu)
2116{
2117 return -ENODEV;
2118}
2119#endif
2120
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002121static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2122 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002123{
Robin Murphy67b65a32016-04-13 18:12:57 +01002124 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002125 struct device *dev = &pdev->dev;
Robin Murphy021bb842016-09-14 15:26:46 +01002126 bool legacy_binding;
2127
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002128 if (of_property_read_u32(dev->of_node, "#global-interrupts",
2129 &smmu->num_global_irqs)) {
2130 dev_err(dev, "missing #global-interrupts property\n");
2131 return -ENODEV;
2132 }
2133
2134 data = of_device_get_match_data(dev);
2135 smmu->version = data->version;
2136 smmu->model = data->model;
2137
2138 parse_driver_options(smmu);
2139
Robin Murphy021bb842016-09-14 15:26:46 +01002140 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
2141 if (legacy_binding && !using_generic_binding) {
2142 if (!using_legacy_binding)
2143 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
2144 using_legacy_binding = true;
2145 } else if (!legacy_binding && !using_legacy_binding) {
2146 using_generic_binding = true;
2147 } else {
2148 dev_err(dev, "not probing due to mismatched DT properties\n");
2149 return -ENODEV;
2150 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002151
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002152 if (of_dma_is_coherent(dev->of_node))
2153 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2154
2155 return 0;
2156}
2157
Robin Murphyf6810c12017-04-10 16:51:05 +05302158static void arm_smmu_bus_init(void)
2159{
2160 /* Oh, for a proper bus abstraction */
2161 if (!iommu_present(&platform_bus_type))
2162 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2163#ifdef CONFIG_ARM_AMBA
2164 if (!iommu_present(&amba_bustype))
2165 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2166#endif
2167#ifdef CONFIG_PCI
2168 if (!iommu_present(&pci_bus_type)) {
2169 pci_request_acs();
2170 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2171 }
2172#endif
Nipun Guptaeab03e22018-09-10 19:19:18 +05302173#ifdef CONFIG_FSL_MC_BUS
2174 if (!iommu_present(&fsl_mc_bus_type))
2175 bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
2176#endif
Robin Murphyf6810c12017-04-10 16:51:05 +05302177}
2178
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002179static int arm_smmu_device_probe(struct platform_device *pdev)
2180{
2181 struct resource *res;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002182 resource_size_t ioaddr;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002183 struct arm_smmu_device *smmu;
2184 struct device *dev = &pdev->dev;
2185 int num_irqs, i, err;
2186
Will Deacon45ae7cf2013-06-24 18:31:25 +01002187 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2188 if (!smmu) {
2189 dev_err(dev, "failed to allocate arm_smmu_device\n");
2190 return -ENOMEM;
2191 }
2192 smmu->dev = dev;
2193
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002194 if (dev->of_node)
2195 err = arm_smmu_device_dt_probe(pdev, smmu);
2196 else
2197 err = arm_smmu_device_acpi_probe(pdev, smmu);
2198
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002199 if (err)
2200 return err;
Robin Murphy09360402014-08-28 17:51:59 +01002201
Will Deacon45ae7cf2013-06-24 18:31:25 +01002202 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002203 ioaddr = res->start;
Julia Lawall8a7f4312013-08-19 12:20:37 +01002204 smmu->base = devm_ioremap_resource(dev, res);
2205 if (IS_ERR(smmu->base))
2206 return PTR_ERR(smmu->base);
Robin Murphy452107c2017-03-30 17:56:30 +01002207 smmu->cb_base = smmu->base + resource_size(res) / 2;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002208
Will Deacon45ae7cf2013-06-24 18:31:25 +01002209 num_irqs = 0;
2210 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2211 num_irqs++;
2212 if (num_irqs > smmu->num_global_irqs)
2213 smmu->num_context_irqs++;
2214 }
2215
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002216 if (!smmu->num_context_irqs) {
2217 dev_err(dev, "found %d interrupts but expected at least %d\n",
2218 num_irqs, smmu->num_global_irqs + 1);
2219 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002220 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002221
Kees Cooka86854d2018-06-12 14:07:58 -07002222 smmu->irqs = devm_kcalloc(dev, num_irqs, sizeof(*smmu->irqs),
Will Deacon45ae7cf2013-06-24 18:31:25 +01002223 GFP_KERNEL);
2224 if (!smmu->irqs) {
2225 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2226 return -ENOMEM;
2227 }
2228
2229 for (i = 0; i < num_irqs; ++i) {
2230 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002231
Will Deacon45ae7cf2013-06-24 18:31:25 +01002232 if (irq < 0) {
2233 dev_err(dev, "failed to get irq index %d\n", i);
2234 return -ENODEV;
2235 }
2236 smmu->irqs[i] = irq;
2237 }
2238
Sricharan R96a299d2018-12-04 11:52:09 +05302239 err = devm_clk_bulk_get_all(dev, &smmu->clks);
2240 if (err < 0) {
2241 dev_err(dev, "failed to get clocks %d\n", err);
2242 return err;
2243 }
2244 smmu->num_clks = err;
2245
2246 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
2247 if (err)
2248 return err;
2249
Olav Haugan3c8766d2014-08-22 17:12:32 -07002250 err = arm_smmu_device_cfg_probe(smmu);
2251 if (err)
2252 return err;
2253
Vivek Gautamd1e20222018-07-19 23:23:56 +05302254 if (smmu->version == ARM_SMMU_V2) {
2255 if (smmu->num_context_banks > smmu->num_context_irqs) {
2256 dev_err(dev,
2257 "found only %d context irq(s) but %d required\n",
2258 smmu->num_context_irqs, smmu->num_context_banks);
2259 return -ENODEV;
2260 }
2261
2262 /* Ignore superfluous interrupts */
2263 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002264 }
2265
Will Deacon45ae7cf2013-06-24 18:31:25 +01002266 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08002267 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2268 arm_smmu_global_fault,
2269 IRQF_SHARED,
2270 "arm-smmu global fault",
2271 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002272 if (err) {
2273 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2274 i, smmu->irqs[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01002275 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002276 }
2277 }
2278
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002279 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2280 "smmu.%pa", &ioaddr);
2281 if (err) {
2282 dev_err(dev, "Failed to register iommu in sysfs\n");
2283 return err;
2284 }
2285
2286 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2287 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2288
2289 err = iommu_device_register(&smmu->iommu);
2290 if (err) {
2291 dev_err(dev, "Failed to register iommu\n");
2292 return err;
2293 }
2294
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002295 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01002296 arm_smmu_device_reset(smmu);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03002297 arm_smmu_test_smr_masks(smmu);
Robin Murphy021bb842016-09-14 15:26:46 +01002298
Robin Murphyf6810c12017-04-10 16:51:05 +05302299 /*
Sricharan Rd4a44f02018-12-04 11:52:10 +05302300 * We want to avoid touching dev->power.lock in fastpaths unless
2301 * it's really going to do something useful - pm_runtime_enabled()
2302 * can serve as an ideal proxy for that decision. So, conditionally
2303 * enable pm_runtime.
2304 */
2305 if (dev->pm_domain) {
2306 pm_runtime_set_active(dev);
2307 pm_runtime_enable(dev);
2308 }
2309
2310 /*
Robin Murphyf6810c12017-04-10 16:51:05 +05302311 * For ACPI and generic DT bindings, an SMMU will be probed before
2312 * any device which might need it, so we want the bus ops in place
2313 * ready to handle default domain setup as soon as any SMMU exists.
2314 */
2315 if (!using_legacy_binding)
2316 arm_smmu_bus_init();
2317
Will Deacon45ae7cf2013-06-24 18:31:25 +01002318 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002319}
2320
Robin Murphyf6810c12017-04-10 16:51:05 +05302321/*
2322 * With the legacy DT binding in play, though, we have no guarantees about
2323 * probe order, but then we're also not doing default domains, so we can
2324 * delay setting bus ops until we're sure every possible SMMU is ready,
2325 * and that way ensure that no add_device() calls get missed.
2326 */
2327static int arm_smmu_legacy_bus_init(void)
2328{
2329 if (using_legacy_binding)
2330 arm_smmu_bus_init();
2331 return 0;
2332}
2333device_initcall_sync(arm_smmu_legacy_bus_init);
2334
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002335static void arm_smmu_device_shutdown(struct platform_device *pdev)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002336{
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002337 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002338
2339 if (!smmu)
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002340 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002341
Will Deaconecfadb62013-07-31 19:21:28 +01002342 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002343 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002344
Sricharan Rd4a44f02018-12-04 11:52:10 +05302345 arm_smmu_rpm_get(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002346 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002347 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Sricharan Rd4a44f02018-12-04 11:52:10 +05302348 arm_smmu_rpm_put(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302349
Sricharan Rd4a44f02018-12-04 11:52:10 +05302350 if (pm_runtime_enabled(smmu->dev))
2351 pm_runtime_force_suspend(smmu->dev);
2352 else
2353 clk_bulk_disable(smmu->num_clks, smmu->clks);
2354
2355 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
Nate Watterson7aa86192017-06-29 18:18:15 -04002356}
2357
Sricharan R96a299d2018-12-04 11:52:09 +05302358static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
Robin Murphya2d866f2017-08-08 14:56:15 +01002359{
2360 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
Sricharan R96a299d2018-12-04 11:52:09 +05302361 int ret;
2362
2363 ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
2364 if (ret)
2365 return ret;
Robin Murphya2d866f2017-08-08 14:56:15 +01002366
2367 arm_smmu_device_reset(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302368
Will Deacon45ae7cf2013-06-24 18:31:25 +01002369 return 0;
2370}
2371
Sricharan R96a299d2018-12-04 11:52:09 +05302372static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
Dan Carpenter6614ee72013-08-21 09:34:20 +01002373{
Sricharan R96a299d2018-12-04 11:52:09 +05302374 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2375
2376 clk_bulk_disable(smmu->num_clks, smmu->clks);
2377
2378 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002379}
2380
Robin Murphya2d866f2017-08-08 14:56:15 +01002381static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
2382{
Sricharan R96a299d2018-12-04 11:52:09 +05302383 if (pm_runtime_suspended(dev))
2384 return 0;
Robin Murphya2d866f2017-08-08 14:56:15 +01002385
Sricharan R96a299d2018-12-04 11:52:09 +05302386 return arm_smmu_runtime_resume(dev);
Robin Murphya2d866f2017-08-08 14:56:15 +01002387}
2388
Sricharan R96a299d2018-12-04 11:52:09 +05302389static int __maybe_unused arm_smmu_pm_suspend(struct device *dev)
2390{
2391 if (pm_runtime_suspended(dev))
2392 return 0;
2393
2394 return arm_smmu_runtime_suspend(dev);
2395}
2396
2397static const struct dev_pm_ops arm_smmu_pm_ops = {
2398 SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_pm_suspend, arm_smmu_pm_resume)
2399 SET_RUNTIME_PM_OPS(arm_smmu_runtime_suspend,
2400 arm_smmu_runtime_resume, NULL)
2401};
Robin Murphya2d866f2017-08-08 14:56:15 +01002402
Will Deacon45ae7cf2013-06-24 18:31:25 +01002403static struct platform_driver arm_smmu_driver = {
2404 .driver = {
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002405 .name = "arm-smmu",
2406 .of_match_table = of_match_ptr(arm_smmu_of_match),
2407 .pm = &arm_smmu_pm_ops,
2408 .suppress_bind_attrs = true,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002409 },
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002410 .probe = arm_smmu_device_probe,
Nate Watterson7aa86192017-06-29 18:18:15 -04002411 .shutdown = arm_smmu_device_shutdown,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002412};
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002413builtin_platform_driver(arm_smmu_driver);