blob: 602b67d4f2d607cbeed71f1d80ec906db9d770d6 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +030027 * - Extended Stream ID (16 bit)
Will Deacon45ae7cf2013-06-24 18:31:25 +010028 */
29
30#define pr_fmt(fmt) "arm-smmu: " fmt
31
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +000032#include <linux/acpi.h>
33#include <linux/acpi_iort.h>
Robin Murphy1f3d5ca2016-09-12 17:13:49 +010034#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010035#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000036#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010037#include <linux/dma-mapping.h>
38#include <linux/err.h>
39#include <linux/interrupt.h>
40#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010041#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010042#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000043#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010044#include <linux/module.h>
45#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010046#include <linux/of_address.h>
Robin Murphyd6fc5d92016-09-12 17:13:52 +010047#include <linux/of_device.h>
Robin Murphyadfec2e2016-09-12 17:13:55 +010048#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010049#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010050#include <linux/platform_device.h>
Sricharan R96a299d2018-12-04 11:52:09 +053051#include <linux/pm_runtime.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010052#include <linux/slab.h>
53#include <linux/spinlock.h>
54
55#include <linux/amba/bus.h>
Nipun Guptaeab03e22018-09-10 19:19:18 +053056#include <linux/fsl/mc.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010057
Will Deacon518f7132014-11-14 17:17:54 +000058#include "io-pgtable.h"
Rob Clark2b037742017-08-09 10:43:03 -040059#include "arm-smmu-regs.h"
60
61#define ARM_MMU500_ACTLR_CPRE (1 << 1)
62
63#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
Feng Kan74f55d32017-10-11 15:08:39 -070064#define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10)
Rob Clark2b037742017-08-09 10:43:03 -040065#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
66
67#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
68#define TLB_SPIN_COUNT 10
Will Deacon45ae7cf2013-06-24 18:31:25 +010069
Will Deacon45ae7cf2013-06-24 18:31:25 +010070/* Maximum number of context banks per SMMU */
71#define ARM_SMMU_MAX_CBS 128
72
Will Deacon45ae7cf2013-06-24 18:31:25 +010073/* SMMU global address space */
74#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010075#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010076
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000077/*
78 * SMMU global address space with conditional offset to access secure
79 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
80 * nsGFSYNR0: 0x450)
81 */
82#define ARM_SMMU_GR0_NS(smmu) \
83 ((smmu)->base + \
84 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
85 ? 0x400 : 0))
86
Robin Murphyf9a05f02016-04-13 18:13:01 +010087/*
88 * Some 64-bit registers only make sense to write atomically, but in such
89 * cases all the data relevant to AArch32 formats lies within the lower word,
90 * therefore this actually makes more sense than it might first appear.
91 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010092#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010093#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010094#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010095#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010096#endif
97
Will Deacon45ae7cf2013-06-24 18:31:25 +010098/* Translation context bank */
Robin Murphy452107c2017-03-30 17:56:30 +010099#define ARM_SMMU_CB(smmu, n) ((smmu)->cb_base + ((n) << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100100
Eric Augerf3ebee82017-01-19 20:57:55 +0000101#define MSI_IOVA_BASE 0x8000000
102#define MSI_IOVA_LENGTH 0x100000
103
Will Deacon4cf740b2014-07-14 19:47:39 +0100104static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000105module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100106MODULE_PARM_DESC(force_stage,
107 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000108static bool disable_bypass;
109module_param(disable_bypass, bool, S_IRUGO);
110MODULE_PARM_DESC(disable_bypass,
111 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100112
Robin Murphy09360402014-08-28 17:51:59 +0100113enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100114 ARM_SMMU_V1,
115 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100116 ARM_SMMU_V2,
117};
118
Robin Murphy67b65a32016-04-13 18:12:57 +0100119enum arm_smmu_implementation {
120 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100121 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100122 CAVIUM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100123};
124
Robin Murphy8e8b2032016-09-12 17:13:50 +0100125struct arm_smmu_s2cr {
Robin Murphy588888a2016-09-12 17:13:54 +0100126 struct iommu_group *group;
127 int count;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100128 enum arm_smmu_s2cr_type type;
129 enum arm_smmu_s2cr_privcfg privcfg;
130 u8 cbndx;
131};
132
133#define s2cr_init_val (struct arm_smmu_s2cr){ \
134 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
135}
136
Will Deacon45ae7cf2013-06-24 18:31:25 +0100137struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100138 u16 mask;
139 u16 id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100140 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100141};
142
Robin Murphy90df3732017-08-08 14:56:14 +0100143struct arm_smmu_cb {
144 u64 ttbr[2];
145 u32 tcr[2];
146 u32 mair[2];
147 struct arm_smmu_cfg *cfg;
148};
149
Will Deacona9a1b0b2014-05-01 18:05:08 +0100150struct arm_smmu_master_cfg {
Robin Murphyf80cd882016-09-14 15:21:39 +0100151 struct arm_smmu_device *smmu;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100152 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100153};
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100154#define INVALID_SMENDX -1
Robin Murphyadfec2e2016-09-12 17:13:55 +0100155#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
156#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000157#define fwspec_smendx(fw, i) \
158 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphyadfec2e2016-09-12 17:13:55 +0100159#define for_each_cfg_sme(fw, i, idx) \
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000160 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100161
162struct arm_smmu_device {
163 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100164
165 void __iomem *base;
Robin Murphy452107c2017-03-30 17:56:30 +0100166 void __iomem *cb_base;
Will Deaconc757e852014-07-30 11:33:25 +0100167 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100168
169#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
170#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
171#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
172#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
173#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000174#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800175#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100176#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
177#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
178#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
179#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
180#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300181#define ARM_SMMU_FEAT_EXIDS (1 << 12)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100182 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000183
184#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
185 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100186 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100187 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100188
189 u32 num_context_banks;
190 u32 num_s2_context_banks;
191 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
Robin Murphy90df3732017-08-08 14:56:14 +0100192 struct arm_smmu_cb *cbs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100193 atomic_t irptndx;
194
195 u32 num_mapping_groups;
Robin Murphy21174242016-09-12 17:13:48 +0100196 u16 streamid_mask;
197 u16 smr_mask_mask;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100198 struct arm_smmu_smr *smrs;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100199 struct arm_smmu_s2cr *s2crs;
Robin Murphy588888a2016-09-12 17:13:54 +0100200 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100201
Will Deacon518f7132014-11-14 17:17:54 +0000202 unsigned long va_size;
203 unsigned long ipa_size;
204 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100205 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100206
207 u32 num_global_irqs;
208 u32 num_context_irqs;
209 unsigned int *irqs;
Sricharan R96a299d2018-12-04 11:52:09 +0530210 struct clk_bulk_data *clks;
211 int num_clks;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100212
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800213 u32 cavium_id_base; /* Specific to Cavium */
Joerg Roedel9648cbc2017-02-01 18:11:36 +0100214
Will Deacon8e517e72017-07-06 15:55:48 +0100215 spinlock_t global_sync_lock;
216
Joerg Roedel9648cbc2017-02-01 18:11:36 +0100217 /* IOMMU core code handle */
218 struct iommu_device iommu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100219};
220
Robin Murphy7602b872016-04-28 17:12:09 +0100221enum arm_smmu_context_fmt {
222 ARM_SMMU_CTX_FMT_NONE,
223 ARM_SMMU_CTX_FMT_AARCH64,
224 ARM_SMMU_CTX_FMT_AARCH32_L,
225 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100226};
227
228struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100229 u8 cbndx;
230 u8 irptndx;
Robin Murphy280b6832017-03-30 17:56:29 +0100231 union {
232 u16 asid;
233 u16 vmid;
234 };
Will Deacon45ae7cf2013-06-24 18:31:25 +0100235 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100236 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100237};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100238#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100239
Will Deaconc752ce42014-06-25 22:46:31 +0100240enum arm_smmu_domain_stage {
241 ARM_SMMU_DOMAIN_S1 = 0,
242 ARM_SMMU_DOMAIN_S2,
243 ARM_SMMU_DOMAIN_NESTED,
Will Deacon61bc6712017-01-06 16:56:03 +0000244 ARM_SMMU_DOMAIN_BYPASS,
Will Deaconc752ce42014-06-25 22:46:31 +0100245};
246
Will Deacon45ae7cf2013-06-24 18:31:25 +0100247struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100248 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000249 struct io_pgtable_ops *pgtbl_ops;
Robin Murphy32b12442017-09-28 15:55:01 +0100250 const struct iommu_gather_ops *tlb_ops;
Will Deacon44680ee2014-06-25 11:29:12 +0100251 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100252 enum arm_smmu_domain_stage stage;
Robin Murphy44f68762018-09-20 17:10:27 +0100253 bool non_strict;
Will Deacon518f7132014-11-14 17:17:54 +0000254 struct mutex init_mutex; /* Protects smmu pointer */
Will Deacon8e517e72017-07-06 15:55:48 +0100255 spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
Joerg Roedel1d672632015-03-26 13:43:10 +0100256 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100257};
258
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000259struct arm_smmu_option_prop {
260 u32 opt;
261 const char *prop;
262};
263
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800264static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
265
Robin Murphy021bb842016-09-14 15:26:46 +0100266static bool using_legacy_binding, using_generic_binding;
267
Mitchel Humpherys29073202014-07-08 09:52:18 -0700268static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000269 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
270 { 0, NULL},
271};
272
Joerg Roedel1d672632015-03-26 13:43:10 +0100273static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
274{
275 return container_of(dom, struct arm_smmu_domain, domain);
276}
277
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000278static void parse_driver_options(struct arm_smmu_device *smmu)
279{
280 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700281
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000282 do {
283 if (of_property_read_bool(smmu->dev->of_node,
284 arm_smmu_options[i].prop)) {
285 smmu->options |= arm_smmu_options[i].opt;
286 dev_notice(smmu->dev, "option %s\n",
287 arm_smmu_options[i].prop);
288 }
289 } while (arm_smmu_options[++i].opt);
290}
291
Will Deacon8f68f8e2014-07-15 11:27:08 +0100292static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100293{
294 if (dev_is_pci(dev)) {
295 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700296
Will Deacona9a1b0b2014-05-01 18:05:08 +0100297 while (!pci_is_root_bus(bus))
298 bus = bus->parent;
Robin Murphyf80cd882016-09-14 15:21:39 +0100299 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100300 }
301
Robin Murphyf80cd882016-09-14 15:21:39 +0100302 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100303}
304
Robin Murphyf80cd882016-09-14 15:21:39 +0100305static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100306{
Robin Murphyf80cd882016-09-14 15:21:39 +0100307 *((__be32 *)data) = cpu_to_be32(alias);
308 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100309}
310
Robin Murphyf80cd882016-09-14 15:21:39 +0100311static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100312{
Robin Murphyf80cd882016-09-14 15:21:39 +0100313 struct of_phandle_iterator *it = *(void **)data;
314 struct device_node *np = it->node;
315 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100316
Robin Murphyf80cd882016-09-14 15:21:39 +0100317 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
318 "#stream-id-cells", 0)
319 if (it->node == np) {
320 *(void **)data = dev;
321 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700322 }
Robin Murphyf80cd882016-09-14 15:21:39 +0100323 it->node = np;
324 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100325}
326
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100327static struct platform_driver arm_smmu_driver;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100328static struct iommu_ops arm_smmu_ops;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100329
Robin Murphyadfec2e2016-09-12 17:13:55 +0100330static int arm_smmu_register_legacy_master(struct device *dev,
331 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100332{
Robin Murphyadfec2e2016-09-12 17:13:55 +0100333 struct device *smmu_dev;
Robin Murphyf80cd882016-09-14 15:21:39 +0100334 struct device_node *np;
335 struct of_phandle_iterator it;
336 void *data = &it;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100337 u32 *sids;
Robin Murphyf80cd882016-09-14 15:21:39 +0100338 __be32 pci_sid;
339 int err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100340
Robin Murphyf80cd882016-09-14 15:21:39 +0100341 np = dev_get_dev_node(dev);
342 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
343 of_node_put(np);
344 return -ENODEV;
345 }
346
347 it.node = np;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100348 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
349 __find_legacy_master_phandle);
Robin Murphyadfec2e2016-09-12 17:13:55 +0100350 smmu_dev = data;
Robin Murphyf80cd882016-09-14 15:21:39 +0100351 of_node_put(np);
352 if (err == 0)
353 return -ENODEV;
354 if (err < 0)
355 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100356
Robin Murphyf80cd882016-09-14 15:21:39 +0100357 if (dev_is_pci(dev)) {
358 /* "mmu-masters" assumes Stream ID == Requester ID */
359 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
360 &pci_sid);
361 it.cur = &pci_sid;
362 it.cur_count = 1;
363 }
364
Robin Murphyadfec2e2016-09-12 17:13:55 +0100365 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
366 &arm_smmu_ops);
367 if (err)
368 return err;
369
370 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
371 if (!sids)
Robin Murphyf80cd882016-09-14 15:21:39 +0100372 return -ENOMEM;
373
Robin Murphyadfec2e2016-09-12 17:13:55 +0100374 *smmu = dev_get_drvdata(smmu_dev);
375 of_phandle_iterator_args(&it, sids, it.cur_count);
376 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
377 kfree(sids);
378 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100379}
380
381static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
382{
383 int idx;
384
385 do {
386 idx = find_next_zero_bit(map, end, start);
387 if (idx == end)
388 return -ENOSPC;
389 } while (test_and_set_bit(idx, map));
390
391 return idx;
392}
393
394static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
395{
396 clear_bit(idx, map);
397}
398
399/* Wait for any pending TLB invalidations to complete */
Robin Murphy11febfc2017-03-30 17:56:31 +0100400static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
401 void __iomem *sync, void __iomem *status)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100402{
Robin Murphy8513c892017-03-30 17:56:32 +0100403 unsigned int spin_cnt, delay;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100404
Robin Murphy11febfc2017-03-30 17:56:31 +0100405 writel_relaxed(0, sync);
Robin Murphy8513c892017-03-30 17:56:32 +0100406 for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
407 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
408 if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
409 return;
410 cpu_relax();
Will Deacon45ae7cf2013-06-24 18:31:25 +0100411 }
Robin Murphy8513c892017-03-30 17:56:32 +0100412 udelay(delay);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100413 }
Robin Murphy8513c892017-03-30 17:56:32 +0100414 dev_err_ratelimited(smmu->dev,
415 "TLB sync timed out -- SMMU may be deadlocked\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +0100416}
417
Robin Murphy11febfc2017-03-30 17:56:31 +0100418static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
Will Deacon1463fe42013-07-31 19:21:27 +0100419{
Robin Murphy11febfc2017-03-30 17:56:31 +0100420 void __iomem *base = ARM_SMMU_GR0(smmu);
Will Deacon8e517e72017-07-06 15:55:48 +0100421 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100422
Will Deacon8e517e72017-07-06 15:55:48 +0100423 spin_lock_irqsave(&smmu->global_sync_lock, flags);
Robin Murphy11febfc2017-03-30 17:56:31 +0100424 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
425 base + ARM_SMMU_GR0_sTLBGSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100426 spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000427}
428
Robin Murphy11febfc2017-03-30 17:56:31 +0100429static void arm_smmu_tlb_sync_context(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100430{
Will Deacon518f7132014-11-14 17:17:54 +0000431 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy11febfc2017-03-30 17:56:31 +0100432 struct arm_smmu_device *smmu = smmu_domain->smmu;
433 void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
Will Deacon8e517e72017-07-06 15:55:48 +0100434 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100435
Will Deacon8e517e72017-07-06 15:55:48 +0100436 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy11febfc2017-03-30 17:56:31 +0100437 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
438 base + ARM_SMMU_CB_TLBSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100439 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000440}
441
Robin Murphy11febfc2017-03-30 17:56:31 +0100442static void arm_smmu_tlb_sync_vmid(void *cookie)
443{
444 struct arm_smmu_domain *smmu_domain = cookie;
445
446 arm_smmu_tlb_sync_global(smmu_domain->smmu);
447}
448
449static void arm_smmu_tlb_inv_context_s1(void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000450{
451 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100452 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy11febfc2017-03-30 17:56:31 +0100453 void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
454
Robin Murphy44f68762018-09-20 17:10:27 +0100455 /*
456 * NOTE: this is not a relaxed write; it needs to guarantee that PTEs
457 * cleared by the current CPU are visible to the SMMU before the TLBI.
458 */
459 writel(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
Robin Murphy11febfc2017-03-30 17:56:31 +0100460 arm_smmu_tlb_sync_context(cookie);
461}
462
463static void arm_smmu_tlb_inv_context_s2(void *cookie)
464{
465 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100466 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy11febfc2017-03-30 17:56:31 +0100467 void __iomem *base = ARM_SMMU_GR0(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100468
Robin Murphy44f68762018-09-20 17:10:27 +0100469 /* NOTE: see above */
470 writel(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
Robin Murphy11febfc2017-03-30 17:56:31 +0100471 arm_smmu_tlb_sync_global(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100472}
473
Will Deacon518f7132014-11-14 17:17:54 +0000474static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000475 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000476{
477 struct arm_smmu_domain *smmu_domain = cookie;
478 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000479 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Robin Murphy11febfc2017-03-30 17:56:31 +0100480 void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000481
Will Deacon7d321bd32018-10-01 12:42:49 +0100482 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
483 wmb();
484
Will Deacon518f7132014-11-14 17:17:54 +0000485 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +0000486 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
487
Robin Murphy7602b872016-04-28 17:12:09 +0100488 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000489 iova &= ~12UL;
Robin Murphy280b6832017-03-30 17:56:29 +0100490 iova |= cfg->asid;
Robin Murphy75df1382015-12-07 18:18:52 +0000491 do {
492 writel_relaxed(iova, reg);
493 iova += granule;
494 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000495 } else {
496 iova >>= 12;
Robin Murphy280b6832017-03-30 17:56:29 +0100497 iova |= (u64)cfg->asid << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000498 do {
499 writeq_relaxed(iova, reg);
500 iova += granule >> 12;
501 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000502 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100503 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000504 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
505 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000506 iova >>= 12;
507 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100508 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000509 iova += granule >> 12;
510 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000511 }
512}
513
Robin Murphy11febfc2017-03-30 17:56:31 +0100514/*
515 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
516 * almost negligible, but the benefit of getting the first one in as far ahead
517 * of the sync as possible is significant, hence we don't just make this a
518 * no-op and set .tlb_sync to arm_smmu_inv_context_s2() as you might think.
519 */
520static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
521 size_t granule, bool leaf, void *cookie)
522{
523 struct arm_smmu_domain *smmu_domain = cookie;
524 void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
525
Will Deacon7d321bd32018-10-01 12:42:49 +0100526 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
527 wmb();
528
Robin Murphy11febfc2017-03-30 17:56:31 +0100529 writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
530}
531
532static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = {
533 .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
Will Deacon518f7132014-11-14 17:17:54 +0000534 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
Robin Murphy11febfc2017-03-30 17:56:31 +0100535 .tlb_sync = arm_smmu_tlb_sync_context,
536};
537
538static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = {
539 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
540 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
541 .tlb_sync = arm_smmu_tlb_sync_context,
542};
543
544static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = {
545 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
546 .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync,
547 .tlb_sync = arm_smmu_tlb_sync_vmid,
Will Deacon518f7132014-11-14 17:17:54 +0000548};
549
Will Deacon45ae7cf2013-06-24 18:31:25 +0100550static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
551{
Will Deacon3714ce1d2016-08-05 19:49:45 +0100552 u32 fsr, fsynr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100553 unsigned long iova;
554 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100555 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100556 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
557 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100558 void __iomem *cb_base;
559
Robin Murphy452107c2017-03-30 17:56:30 +0100560 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100561 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
562
563 if (!(fsr & FSR_FAULT))
564 return IRQ_NONE;
565
Will Deacon45ae7cf2013-06-24 18:31:25 +0100566 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Robin Murphyf9a05f02016-04-13 18:13:01 +0100567 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100568
Will Deacon3714ce1d2016-08-05 19:49:45 +0100569 dev_err_ratelimited(smmu->dev,
570 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
571 fsr, iova, fsynr, cfg->cbndx);
572
Will Deacon45ae7cf2013-06-24 18:31:25 +0100573 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100574 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100575}
576
577static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
578{
579 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
580 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000581 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100582
583 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
584 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
585 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
586 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
587
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000588 if (!gfsr)
589 return IRQ_NONE;
590
Will Deacon45ae7cf2013-06-24 18:31:25 +0100591 dev_err_ratelimited(smmu->dev,
592 "Unexpected global fault, this could be serious\n");
593 dev_err_ratelimited(smmu->dev,
594 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
595 gfsr, gfsynr0, gfsynr1, gfsynr2);
596
597 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100598 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100599}
600
Will Deacon518f7132014-11-14 17:17:54 +0000601static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
602 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100603{
Will Deacon44680ee2014-06-25 11:29:12 +0100604 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy90df3732017-08-08 14:56:14 +0100605 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
606 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
607
608 cb->cfg = cfg;
609
610 /* TTBCR */
611 if (stage1) {
612 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
613 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
614 } else {
615 cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
616 cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
617 cb->tcr[1] |= TTBCR2_SEP_UPSTREAM;
618 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
619 cb->tcr[1] |= TTBCR2_AS;
620 }
621 } else {
622 cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
623 }
624
625 /* TTBRs */
626 if (stage1) {
627 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
628 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
629 cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
630 } else {
631 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
632 cb->ttbr[0] |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
633 cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
634 cb->ttbr[1] |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
635 }
636 } else {
637 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
638 }
639
640 /* MAIRs (stage-1 only) */
641 if (stage1) {
642 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
643 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
644 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
645 } else {
646 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
647 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
648 }
649 }
650}
651
652static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
653{
654 u32 reg;
655 bool stage1;
656 struct arm_smmu_cb *cb = &smmu->cbs[idx];
657 struct arm_smmu_cfg *cfg = cb->cfg;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100658 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100659
Robin Murphy90df3732017-08-08 14:56:14 +0100660 cb_base = ARM_SMMU_CB(smmu, idx);
661
662 /* Unassigned context banks only need disabling */
663 if (!cfg) {
664 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
665 return;
666 }
667
Will Deacon45ae7cf2013-06-24 18:31:25 +0100668 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100669 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100670
Robin Murphy90df3732017-08-08 14:56:14 +0100671 /* CBA2R */
Will Deacon4a1c93c2015-03-04 12:21:03 +0000672 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100673 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
674 reg = CBA2R_RW64_64BIT;
675 else
676 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800677 /* 16-bit VMIDs live in CBA2R */
678 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Robin Murphy280b6832017-03-30 17:56:29 +0100679 reg |= cfg->vmid << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800680
Robin Murphy90df3732017-08-08 14:56:14 +0100681 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(idx));
Will Deacon4a1c93c2015-03-04 12:21:03 +0000682 }
683
Will Deacon45ae7cf2013-06-24 18:31:25 +0100684 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100685 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100686 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700687 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100688
Will Deacon57ca90f2014-02-06 14:59:05 +0000689 /*
690 * Use the weakest shareability/memory types, so they are
691 * overridden by the ttbcr/pte.
692 */
693 if (stage1) {
694 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
695 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800696 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
697 /* 8-bit VMIDs live in CBAR */
Robin Murphy280b6832017-03-30 17:56:29 +0100698 reg |= cfg->vmid << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000699 }
Robin Murphy90df3732017-08-08 14:56:14 +0100700 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(idx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100701
Sunil Goutham125458a2017-03-28 16:11:12 +0530702 /*
703 * TTBCR
704 * We must write this before the TTBRs, since it determines the
705 * access behaviour of some fields (in particular, ASID[15:8]).
706 */
Robin Murphy90df3732017-08-08 14:56:14 +0100707 if (stage1 && smmu->version > ARM_SMMU_V1)
708 writel_relaxed(cb->tcr[1], cb_base + ARM_SMMU_CB_TTBCR2);
709 writel_relaxed(cb->tcr[0], cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100710
Will Deacon45ae7cf2013-06-24 18:31:25 +0100711 /* TTBRs */
Robin Murphy90df3732017-08-08 14:56:14 +0100712 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
713 writel_relaxed(cfg->asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
714 writel_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0);
715 writel_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100716 } else {
Robin Murphy90df3732017-08-08 14:56:14 +0100717 writeq_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0);
718 if (stage1)
719 writeq_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100720 }
721
Will Deacon518f7132014-11-14 17:17:54 +0000722 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100723 if (stage1) {
Robin Murphy90df3732017-08-08 14:56:14 +0100724 writel_relaxed(cb->mair[0], cb_base + ARM_SMMU_CB_S1_MAIR0);
725 writel_relaxed(cb->mair[1], cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100726 }
727
Will Deacon45ae7cf2013-06-24 18:31:25 +0100728 /* SCTLR */
Robin Murphy60705292016-08-11 17:44:06 +0100729 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100730 if (stage1)
731 reg |= SCTLR_S1_ASIDPNE;
Robin Murphy90df3732017-08-08 14:56:14 +0100732 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
733 reg |= SCTLR_E;
734
Will Deacon25724842013-08-21 13:49:53 +0100735 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100736}
737
738static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100739 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100740{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100741 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000742 unsigned long ias, oas;
743 struct io_pgtable_ops *pgtbl_ops;
744 struct io_pgtable_cfg pgtbl_cfg;
745 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100746 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100747 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100748
Will Deacon518f7132014-11-14 17:17:54 +0000749 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100750 if (smmu_domain->smmu)
751 goto out_unlock;
752
Will Deacon61bc6712017-01-06 16:56:03 +0000753 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
754 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
755 smmu_domain->smmu = smmu;
756 goto out_unlock;
757 }
758
Will Deaconc752ce42014-06-25 22:46:31 +0100759 /*
760 * Mapping the requested stage onto what we support is surprisingly
761 * complicated, mainly because the spec allows S1+S2 SMMUs without
762 * support for nested translation. That means we end up with the
763 * following table:
764 *
765 * Requested Supported Actual
766 * S1 N S1
767 * S1 S1+S2 S1
768 * S1 S2 S2
769 * S1 S1 S1
770 * N N N
771 * N S1+S2 S2
772 * N S2 S2
773 * N S1 S1
774 *
775 * Note that you can't actually request stage-2 mappings.
776 */
777 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
778 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
779 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
780 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
781
Robin Murphy7602b872016-04-28 17:12:09 +0100782 /*
783 * Choosing a suitable context format is even more fiddly. Until we
784 * grow some way for the caller to express a preference, and/or move
785 * the decision into the io-pgtable code where it arguably belongs,
786 * just aim for the closest thing to the rest of the system, and hope
787 * that the hardware isn't esoteric enough that we can't assume AArch64
788 * support to be a superset of AArch32 support...
789 */
790 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
791 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100792 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
793 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
794 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
795 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
796 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100797 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
798 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
799 ARM_SMMU_FEAT_FMT_AARCH64_16K |
800 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
801 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
802
803 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
804 ret = -EINVAL;
805 goto out_unlock;
806 }
807
Will Deaconc752ce42014-06-25 22:46:31 +0100808 switch (smmu_domain->stage) {
809 case ARM_SMMU_DOMAIN_S1:
810 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
811 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000812 ias = smmu->va_size;
813 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100814 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000815 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +0100816 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +0000817 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100818 ias = min(ias, 32UL);
819 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +0100820 } else {
821 fmt = ARM_V7S;
822 ias = min(ias, 32UL);
823 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +0100824 }
Robin Murphy32b12442017-09-28 15:55:01 +0100825 smmu_domain->tlb_ops = &arm_smmu_s1_tlb_ops;
Will Deaconc752ce42014-06-25 22:46:31 +0100826 break;
827 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100828 /*
829 * We will likely want to change this if/when KVM gets
830 * involved.
831 */
Will Deaconc752ce42014-06-25 22:46:31 +0100832 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100833 cfg->cbar = CBAR_TYPE_S2_TRANS;
834 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000835 ias = smmu->ipa_size;
836 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100837 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000838 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100839 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000840 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100841 ias = min(ias, 40UL);
842 oas = min(oas, 40UL);
843 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100844 if (smmu->version == ARM_SMMU_V2)
Robin Murphy32b12442017-09-28 15:55:01 +0100845 smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v2;
Robin Murphy11febfc2017-03-30 17:56:31 +0100846 else
Robin Murphy32b12442017-09-28 15:55:01 +0100847 smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v1;
Will Deaconc752ce42014-06-25 22:46:31 +0100848 break;
849 default:
850 ret = -EINVAL;
851 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100852 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100853 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
854 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200855 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100856 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100857
Will Deacon44680ee2014-06-25 11:29:12 +0100858 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100859 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100860 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
861 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100862 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100863 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100864 }
865
Robin Murphy280b6832017-03-30 17:56:29 +0100866 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
867 cfg->vmid = cfg->cbndx + 1 + smmu->cavium_id_base;
868 else
869 cfg->asid = cfg->cbndx + smmu->cavium_id_base;
870
Will Deacon518f7132014-11-14 17:17:54 +0000871 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100872 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000873 .ias = ias,
874 .oas = oas,
Robin Murphy32b12442017-09-28 15:55:01 +0100875 .tlb = smmu_domain->tlb_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100876 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000877 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100878
Robin Murphy81b3c252017-06-22 16:53:53 +0100879 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
880 pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
881
Robin Murphy44f68762018-09-20 17:10:27 +0100882 if (smmu_domain->non_strict)
883 pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
884
Will Deacon518f7132014-11-14 17:17:54 +0000885 smmu_domain->smmu = smmu;
886 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
887 if (!pgtbl_ops) {
888 ret = -ENOMEM;
889 goto out_clear_smmu;
890 }
891
Robin Murphyd5466352016-05-09 17:20:09 +0100892 /* Update the domain's page sizes to reflect the page table format */
893 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Robin Murphy455eb7d2016-09-12 17:13:58 +0100894 domain->geometry.aperture_end = (1UL << ias) - 1;
895 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +0000896
897 /* Initialise the context bank with our page table cfg */
898 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
Robin Murphy90df3732017-08-08 14:56:14 +0100899 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000900
901 /*
902 * Request context fault interrupt. Do this last to avoid the
903 * handler seeing a half-initialised domain state.
904 */
Will Deacon44680ee2014-06-25 11:29:12 +0100905 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800906 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
907 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200908 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100909 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100910 cfg->irptndx, irq);
911 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100912 }
913
Will Deacon518f7132014-11-14 17:17:54 +0000914 mutex_unlock(&smmu_domain->init_mutex);
915
916 /* Publish page table ops for map/unmap */
917 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100918 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100919
Will Deacon518f7132014-11-14 17:17:54 +0000920out_clear_smmu:
921 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100922out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +0000923 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100924 return ret;
925}
926
927static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
928{
Joerg Roedel1d672632015-03-26 13:43:10 +0100929 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100930 struct arm_smmu_device *smmu = smmu_domain->smmu;
931 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100932 int irq;
933
Will Deacon61bc6712017-01-06 16:56:03 +0000934 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100935 return;
936
Will Deacon518f7132014-11-14 17:17:54 +0000937 /*
938 * Disable the context bank and free the page tables before freeing
939 * it.
940 */
Robin Murphy90df3732017-08-08 14:56:14 +0100941 smmu->cbs[cfg->cbndx].cfg = NULL;
942 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100943
Will Deacon44680ee2014-06-25 11:29:12 +0100944 if (cfg->irptndx != INVALID_IRPTNDX) {
945 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800946 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100947 }
948
Markus Elfring44830b02015-11-06 18:32:41 +0100949 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +0100950 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100951}
952
Joerg Roedel1d672632015-03-26 13:43:10 +0100953static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100954{
955 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100956
Will Deacon61bc6712017-01-06 16:56:03 +0000957 if (type != IOMMU_DOMAIN_UNMANAGED &&
958 type != IOMMU_DOMAIN_DMA &&
959 type != IOMMU_DOMAIN_IDENTITY)
Joerg Roedel1d672632015-03-26 13:43:10 +0100960 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100961 /*
962 * Allocate the domain and initialise some of its data structures.
963 * We can't really do anything meaningful until we've added a
964 * master.
965 */
966 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
967 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +0100968 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100969
Robin Murphy021bb842016-09-14 15:26:46 +0100970 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
971 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +0000972 kfree(smmu_domain);
973 return NULL;
974 }
975
Will Deacon518f7132014-11-14 17:17:54 +0000976 mutex_init(&smmu_domain->init_mutex);
Robin Murphy523d7422017-06-22 16:53:56 +0100977 spin_lock_init(&smmu_domain->cb_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +0100978
979 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100980}
981
Joerg Roedel1d672632015-03-26 13:43:10 +0100982static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100983{
Joerg Roedel1d672632015-03-26 13:43:10 +0100984 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +0100985
986 /*
987 * Free the domain resources. We assume that all devices have
988 * already been detached.
989 */
Robin Murphy9adb9592016-01-26 18:06:36 +0000990 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100991 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100992 kfree(smmu_domain);
993}
994
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100995static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
996{
997 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyf80cd882016-09-14 15:21:39 +0100998 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100999
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001000 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001001 reg |= SMR_VALID;
1002 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1003}
1004
Robin Murphy8e8b2032016-09-12 17:13:50 +01001005static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1006{
1007 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1008 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1009 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1010 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1011
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001012 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
1013 smmu->smrs[idx].valid)
1014 reg |= S2CR_EXIDVALID;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001015 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1016}
1017
1018static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1019{
1020 arm_smmu_write_s2cr(smmu, idx);
1021 if (smmu->smrs)
1022 arm_smmu_write_smr(smmu, idx);
1023}
1024
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001025/*
1026 * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
1027 * should be called after sCR0 is written.
1028 */
1029static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
1030{
1031 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1032 u32 smr;
1033
1034 if (!smmu->smrs)
1035 return;
1036
1037 /*
1038 * SMR.ID bits may not be preserved if the corresponding MASK
1039 * bits are set, so check each one separately. We can reject
1040 * masters later if they try to claim IDs outside these masks.
1041 */
1042 smr = smmu->streamid_mask << SMR_ID_SHIFT;
1043 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1044 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1045 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
1046
1047 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1048 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1049 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1050 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
1051}
1052
Robin Murphy588888a2016-09-12 17:13:54 +01001053static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001054{
1055 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy588888a2016-09-12 17:13:54 +01001056 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001057
Robin Murphy588888a2016-09-12 17:13:54 +01001058 /* Stream indexing is blissfully easy */
1059 if (!smrs)
1060 return id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001061
Robin Murphy588888a2016-09-12 17:13:54 +01001062 /* Validating SMRs is... less so */
1063 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1064 if (!smrs[i].valid) {
1065 /*
1066 * Note the first free entry we come across, which
1067 * we'll claim in the end if nothing else matches.
1068 */
1069 if (free_idx < 0)
1070 free_idx = i;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001071 continue;
1072 }
Robin Murphy588888a2016-09-12 17:13:54 +01001073 /*
1074 * If the new entry is _entirely_ matched by an existing entry,
1075 * then reuse that, with the guarantee that there also cannot
1076 * be any subsequent conflicting entries. In normal use we'd
1077 * expect simply identical entries for this case, but there's
1078 * no harm in accommodating the generalisation.
1079 */
1080 if ((mask & smrs[i].mask) == mask &&
1081 !((id ^ smrs[i].id) & ~smrs[i].mask))
1082 return i;
1083 /*
1084 * If the new entry has any other overlap with an existing one,
1085 * though, then there always exists at least one stream ID
1086 * which would cause a conflict, and we can't allow that risk.
1087 */
1088 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1089 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001090 }
1091
Robin Murphy588888a2016-09-12 17:13:54 +01001092 return free_idx;
1093}
1094
1095static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1096{
1097 if (--smmu->s2crs[idx].count)
1098 return false;
1099
1100 smmu->s2crs[idx] = s2cr_init_val;
1101 if (smmu->smrs)
1102 smmu->smrs[idx].valid = false;
1103
1104 return true;
1105}
1106
1107static int arm_smmu_master_alloc_smes(struct device *dev)
1108{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001109 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1110 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy588888a2016-09-12 17:13:54 +01001111 struct arm_smmu_device *smmu = cfg->smmu;
1112 struct arm_smmu_smr *smrs = smmu->smrs;
1113 struct iommu_group *group;
1114 int i, idx, ret;
1115
1116 mutex_lock(&smmu->stream_map_mutex);
1117 /* Figure out a viable stream map entry allocation */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001118 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy021bb842016-09-14 15:26:46 +01001119 u16 sid = fwspec->ids[i];
1120 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1121
Robin Murphy588888a2016-09-12 17:13:54 +01001122 if (idx != INVALID_SMENDX) {
1123 ret = -EEXIST;
1124 goto out_err;
1125 }
1126
Robin Murphy021bb842016-09-14 15:26:46 +01001127 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy588888a2016-09-12 17:13:54 +01001128 if (ret < 0)
1129 goto out_err;
1130
1131 idx = ret;
1132 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy021bb842016-09-14 15:26:46 +01001133 smrs[idx].id = sid;
1134 smrs[idx].mask = mask;
Robin Murphy588888a2016-09-12 17:13:54 +01001135 smrs[idx].valid = true;
1136 }
1137 smmu->s2crs[idx].count++;
1138 cfg->smendx[i] = (s16)idx;
1139 }
1140
1141 group = iommu_group_get_for_dev(dev);
1142 if (!group)
1143 group = ERR_PTR(-ENOMEM);
1144 if (IS_ERR(group)) {
1145 ret = PTR_ERR(group);
1146 goto out_err;
1147 }
1148 iommu_group_put(group);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001149
Will Deacon45ae7cf2013-06-24 18:31:25 +01001150 /* It worked! Now, poke the actual hardware */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001151 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001152 arm_smmu_write_sme(smmu, idx);
1153 smmu->s2crs[idx].group = group;
1154 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001155
Robin Murphy588888a2016-09-12 17:13:54 +01001156 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001157 return 0;
1158
Robin Murphy588888a2016-09-12 17:13:54 +01001159out_err:
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001160 while (i--) {
Robin Murphy588888a2016-09-12 17:13:54 +01001161 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001162 cfg->smendx[i] = INVALID_SMENDX;
1163 }
Robin Murphy588888a2016-09-12 17:13:54 +01001164 mutex_unlock(&smmu->stream_map_mutex);
1165 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001166}
1167
Robin Murphyadfec2e2016-09-12 17:13:55 +01001168static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001169{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001170 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1171 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphyd3097e32016-09-12 17:13:53 +01001172 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001173
Robin Murphy588888a2016-09-12 17:13:54 +01001174 mutex_lock(&smmu->stream_map_mutex);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001175 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001176 if (arm_smmu_free_sme(smmu, idx))
1177 arm_smmu_write_sme(smmu, idx);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001178 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001179 }
Robin Murphy588888a2016-09-12 17:13:54 +01001180 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001181}
1182
Will Deacon45ae7cf2013-06-24 18:31:25 +01001183static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphyadfec2e2016-09-12 17:13:55 +01001184 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001185{
Will Deacon44680ee2014-06-25 11:29:12 +01001186 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001187 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001188 u8 cbndx = smmu_domain->cfg.cbndx;
Will Deacon61bc6712017-01-06 16:56:03 +00001189 enum arm_smmu_s2cr_type type;
Robin Murphy588888a2016-09-12 17:13:54 +01001190 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001191
Will Deacon61bc6712017-01-06 16:56:03 +00001192 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
1193 type = S2CR_TYPE_BYPASS;
1194 else
1195 type = S2CR_TYPE_TRANS;
1196
Robin Murphyadfec2e2016-09-12 17:13:55 +01001197 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy8e8b2032016-09-12 17:13:50 +01001198 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy588888a2016-09-12 17:13:54 +01001199 continue;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001200
Robin Murphy8e8b2032016-09-12 17:13:50 +01001201 s2cr[idx].type = type;
Sricharan Re1989802017-01-06 18:58:15 +05301202 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001203 s2cr[idx].cbndx = cbndx;
1204 arm_smmu_write_s2cr(smmu, idx);
Will Deacon43b412b2014-07-15 11:22:24 +01001205 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001206 return 0;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001207}
1208
Will Deacon45ae7cf2013-06-24 18:31:25 +01001209static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1210{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001211 int ret;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001212 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1213 struct arm_smmu_device *smmu;
Joerg Roedel1d672632015-03-26 13:43:10 +01001214 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001215
Robin Murphyadfec2e2016-09-12 17:13:55 +01001216 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001217 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1218 return -ENXIO;
1219 }
1220
Robin Murphyfba4f8e2016-10-17 12:06:21 +01001221 /*
1222 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1223 * domains between of_xlate() and add_device() - we have no way to cope
1224 * with that, so until ARM gets converted to rely on groups and default
1225 * domains, just say no (but more politely than by dereferencing NULL).
1226 * This should be at least a WARN_ON once that's sorted.
1227 */
1228 if (!fwspec->iommu_priv)
1229 return -ENODEV;
1230
Robin Murphyadfec2e2016-09-12 17:13:55 +01001231 smmu = fwspec_smmu(fwspec);
Will Deacon518f7132014-11-14 17:17:54 +00001232 /* Ensure that the domain is finalised */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001233 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001234 if (ret < 0)
Will Deacon518f7132014-11-14 17:17:54 +00001235 return ret;
1236
Will Deacon45ae7cf2013-06-24 18:31:25 +01001237 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001238 * Sanity check the domain. We don't support domains across
1239 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001240 */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001241 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001242 dev_err(dev,
1243 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphyadfec2e2016-09-12 17:13:55 +01001244 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001245 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001246 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001247
1248 /* Looks ok, so add the device to the domain */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001249 return arm_smmu_domain_add_master(smmu_domain, fwspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001250}
1251
Will Deacon45ae7cf2013-06-24 18:31:25 +01001252static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001253 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001254{
Robin Murphy523d7422017-06-22 16:53:56 +01001255 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001256
Will Deacon518f7132014-11-14 17:17:54 +00001257 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001258 return -ENODEV;
1259
Robin Murphy523d7422017-06-22 16:53:56 +01001260 return ops->map(ops, iova, paddr, size, prot);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001261}
1262
1263static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1264 size_t size)
1265{
Robin Murphy523d7422017-06-22 16:53:56 +01001266 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001267
Will Deacon518f7132014-11-14 17:17:54 +00001268 if (!ops)
1269 return 0;
1270
Robin Murphy523d7422017-06-22 16:53:56 +01001271 return ops->unmap(ops, iova, size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001272}
1273
Robin Murphy44f68762018-09-20 17:10:27 +01001274static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
1275{
1276 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1277
1278 if (smmu_domain->tlb_ops)
1279 smmu_domain->tlb_ops->tlb_flush_all(smmu_domain);
1280}
1281
Robin Murphy32b12442017-09-28 15:55:01 +01001282static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
1283{
1284 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1285
1286 if (smmu_domain->tlb_ops)
1287 smmu_domain->tlb_ops->tlb_sync(smmu_domain);
1288}
1289
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001290static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1291 dma_addr_t iova)
1292{
Joerg Roedel1d672632015-03-26 13:43:10 +01001293 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001294 struct arm_smmu_device *smmu = smmu_domain->smmu;
1295 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1296 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1297 struct device *dev = smmu->dev;
1298 void __iomem *cb_base;
1299 u32 tmp;
1300 u64 phys;
Robin Murphy523d7422017-06-22 16:53:56 +01001301 unsigned long va, flags;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001302
Robin Murphy452107c2017-03-30 17:56:30 +01001303 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001304
Robin Murphy523d7422017-06-22 16:53:56 +01001305 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy661d9622015-05-27 17:09:34 +01001306 /* ATS1 registers can only be written atomically */
1307 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001308 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001309 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1310 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001311 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001312
1313 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1314 !(tmp & ATSR_ACTIVE), 5, 50)) {
Robin Murphy523d7422017-06-22 16:53:56 +01001315 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001316 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001317 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001318 &iova);
1319 return ops->iova_to_phys(ops, iova);
1320 }
1321
Robin Murphyf9a05f02016-04-13 18:13:01 +01001322 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Robin Murphy523d7422017-06-22 16:53:56 +01001323 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001324 if (phys & CB_PAR_F) {
1325 dev_err(dev, "translation fault!\n");
1326 dev_err(dev, "PAR = 0x%llx\n", phys);
1327 return 0;
1328 }
1329
1330 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1331}
1332
Will Deacon45ae7cf2013-06-24 18:31:25 +01001333static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001334 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001335{
Joerg Roedel1d672632015-03-26 13:43:10 +01001336 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Robin Murphy523d7422017-06-22 16:53:56 +01001337 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001338
Sunil Gouthambdf95922017-04-25 15:27:52 +05301339 if (domain->type == IOMMU_DOMAIN_IDENTITY)
1340 return iova;
1341
Will Deacon518f7132014-11-14 17:17:54 +00001342 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001343 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001344
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001345 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
Robin Murphy523d7422017-06-22 16:53:56 +01001346 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1347 return arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001348
Robin Murphy523d7422017-06-22 16:53:56 +01001349 return ops->iova_to_phys(ops, iova);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001350}
1351
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001352static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001353{
Will Deacond0948942014-06-24 17:30:10 +01001354 switch (cap) {
1355 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001356 /*
1357 * Return true here as the SMMU can always send out coherent
1358 * requests.
1359 */
1360 return true;
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001361 case IOMMU_CAP_NOEXEC:
1362 return true;
Will Deacond0948942014-06-24 17:30:10 +01001363 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001364 return false;
Will Deacond0948942014-06-24 17:30:10 +01001365 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001366}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001367
Robin Murphy021bb842016-09-14 15:26:46 +01001368static int arm_smmu_match_node(struct device *dev, void *data)
1369{
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001370 return dev->fwnode == data;
Robin Murphy021bb842016-09-14 15:26:46 +01001371}
1372
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001373static
1374struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
Robin Murphy021bb842016-09-14 15:26:46 +01001375{
1376 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001377 fwnode, arm_smmu_match_node);
Robin Murphy021bb842016-09-14 15:26:46 +01001378 put_device(dev);
1379 return dev ? dev_get_drvdata(dev) : NULL;
1380}
1381
Will Deacon03edb222015-01-19 14:27:33 +00001382static int arm_smmu_add_device(struct device *dev)
1383{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001384 struct arm_smmu_device *smmu;
Robin Murphyf80cd882016-09-14 15:21:39 +01001385 struct arm_smmu_master_cfg *cfg;
Robin Murphy021bb842016-09-14 15:26:46 +01001386 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphyf80cd882016-09-14 15:21:39 +01001387 int i, ret;
1388
Robin Murphy021bb842016-09-14 15:26:46 +01001389 if (using_legacy_binding) {
1390 ret = arm_smmu_register_legacy_master(dev, &smmu);
Artem Savkova7990c62017-08-08 12:26:02 +02001391
1392 /*
1393 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
1394 * will allocate/initialise a new one. Thus we need to update fwspec for
1395 * later use.
1396 */
1397 fwspec = dev->iommu_fwspec;
Robin Murphy021bb842016-09-14 15:26:46 +01001398 if (ret)
1399 goto out_free;
Robin Murphy3c117b52016-11-02 17:31:32 +00001400 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001401 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
Robin Murphy021bb842016-09-14 15:26:46 +01001402 } else {
1403 return -ENODEV;
1404 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001405
1406 ret = -EINVAL;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001407 for (i = 0; i < fwspec->num_ids; i++) {
1408 u16 sid = fwspec->ids[i];
Robin Murphy021bb842016-09-14 15:26:46 +01001409 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyf80cd882016-09-14 15:21:39 +01001410
Robin Murphyadfec2e2016-09-12 17:13:55 +01001411 if (sid & ~smmu->streamid_mask) {
Robin Murphyf80cd882016-09-14 15:21:39 +01001412 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy021bb842016-09-14 15:26:46 +01001413 sid, smmu->streamid_mask);
1414 goto out_free;
1415 }
1416 if (mask & ~smmu->smr_mask_mask) {
1417 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
Peng Fan6323f472017-04-21 17:03:36 +08001418 mask, smmu->smr_mask_mask);
Robin Murphyf80cd882016-09-14 15:21:39 +01001419 goto out_free;
1420 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001421 }
Will Deacon03edb222015-01-19 14:27:33 +00001422
Robin Murphyadfec2e2016-09-12 17:13:55 +01001423 ret = -ENOMEM;
1424 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1425 GFP_KERNEL);
1426 if (!cfg)
1427 goto out_free;
1428
1429 cfg->smmu = smmu;
1430 fwspec->iommu_priv = cfg;
1431 while (i--)
1432 cfg->smendx[i] = INVALID_SMENDX;
1433
Robin Murphy588888a2016-09-12 17:13:54 +01001434 ret = arm_smmu_master_alloc_smes(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001435 if (ret)
Vivek Gautamc54451a2017-07-06 15:07:00 +05301436 goto out_cfg_free;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001437
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001438 iommu_device_link(&smmu->iommu, dev);
1439
Robin Murphyadfec2e2016-09-12 17:13:55 +01001440 return 0;
Robin Murphyf80cd882016-09-14 15:21:39 +01001441
Vivek Gautamc54451a2017-07-06 15:07:00 +05301442out_cfg_free:
1443 kfree(cfg);
Robin Murphyf80cd882016-09-14 15:21:39 +01001444out_free:
Robin Murphyadfec2e2016-09-12 17:13:55 +01001445 iommu_fwspec_free(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001446 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00001447}
1448
Will Deacon45ae7cf2013-06-24 18:31:25 +01001449static void arm_smmu_remove_device(struct device *dev)
1450{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001451 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001452 struct arm_smmu_master_cfg *cfg;
1453 struct arm_smmu_device *smmu;
1454
Robin Murphy8e8b2032016-09-12 17:13:50 +01001455
Robin Murphyadfec2e2016-09-12 17:13:55 +01001456 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyf80cd882016-09-14 15:21:39 +01001457 return;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001458
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001459 cfg = fwspec->iommu_priv;
1460 smmu = cfg->smmu;
1461
1462 iommu_device_unlink(&smmu->iommu, dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001463 arm_smmu_master_free_smes(fwspec);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001464 iommu_group_remove_device(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001465 kfree(fwspec->iommu_priv);
1466 iommu_fwspec_free(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001467}
1468
Joerg Roedelaf659932015-10-21 23:51:41 +02001469static struct iommu_group *arm_smmu_device_group(struct device *dev)
1470{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001471 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1472 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy588888a2016-09-12 17:13:54 +01001473 struct iommu_group *group = NULL;
1474 int i, idx;
1475
Robin Murphyadfec2e2016-09-12 17:13:55 +01001476 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001477 if (group && smmu->s2crs[idx].group &&
1478 group != smmu->s2crs[idx].group)
1479 return ERR_PTR(-EINVAL);
1480
1481 group = smmu->s2crs[idx].group;
1482 }
1483
1484 if (group)
Robin Murphye1b44cb2016-11-11 17:59:22 +00001485 return iommu_group_ref_get(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001486
1487 if (dev_is_pci(dev))
1488 group = pci_device_group(dev);
Nipun Guptaeab03e22018-09-10 19:19:18 +05301489 else if (dev_is_fsl_mc(dev))
1490 group = fsl_mc_device_group(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001491 else
1492 group = generic_device_group(dev);
1493
Joerg Roedelaf659932015-10-21 23:51:41 +02001494 return group;
1495}
1496
Will Deaconc752ce42014-06-25 22:46:31 +01001497static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1498 enum iommu_attr attr, void *data)
1499{
Joerg Roedel1d672632015-03-26 13:43:10 +01001500 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001501
Robin Murphy44f68762018-09-20 17:10:27 +01001502 switch(domain->type) {
1503 case IOMMU_DOMAIN_UNMANAGED:
1504 switch (attr) {
1505 case DOMAIN_ATTR_NESTING:
1506 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1507 return 0;
1508 default:
1509 return -ENODEV;
1510 }
1511 break;
1512 case IOMMU_DOMAIN_DMA:
1513 switch (attr) {
1514 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1515 *(int *)data = smmu_domain->non_strict;
1516 return 0;
1517 default:
1518 return -ENODEV;
1519 }
1520 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001521 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001522 return -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001523 }
1524}
1525
1526static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1527 enum iommu_attr attr, void *data)
1528{
Will Deacon518f7132014-11-14 17:17:54 +00001529 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001530 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001531
Will Deacon518f7132014-11-14 17:17:54 +00001532 mutex_lock(&smmu_domain->init_mutex);
1533
Robin Murphy44f68762018-09-20 17:10:27 +01001534 switch(domain->type) {
1535 case IOMMU_DOMAIN_UNMANAGED:
1536 switch (attr) {
1537 case DOMAIN_ATTR_NESTING:
1538 if (smmu_domain->smmu) {
1539 ret = -EPERM;
1540 goto out_unlock;
1541 }
1542
1543 if (*(int *)data)
1544 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1545 else
1546 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1547 break;
1548 default:
1549 ret = -ENODEV;
Will Deacon518f7132014-11-14 17:17:54 +00001550 }
Robin Murphy44f68762018-09-20 17:10:27 +01001551 break;
1552 case IOMMU_DOMAIN_DMA:
1553 switch (attr) {
1554 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1555 smmu_domain->non_strict = *(int *)data;
1556 break;
1557 default:
1558 ret = -ENODEV;
1559 }
Will Deacon518f7132014-11-14 17:17:54 +00001560 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001561 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001562 ret = -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001563 }
Will Deacon518f7132014-11-14 17:17:54 +00001564out_unlock:
1565 mutex_unlock(&smmu_domain->init_mutex);
1566 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001567}
1568
Robin Murphy021bb842016-09-14 15:26:46 +01001569static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1570{
Robin Murphy56fbf602017-03-31 12:03:33 +01001571 u32 mask, fwid = 0;
Robin Murphy021bb842016-09-14 15:26:46 +01001572
1573 if (args->args_count > 0)
1574 fwid |= (u16)args->args[0];
1575
1576 if (args->args_count > 1)
1577 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
Robin Murphy56fbf602017-03-31 12:03:33 +01001578 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
1579 fwid |= (u16)mask << SMR_MASK_SHIFT;
Robin Murphy021bb842016-09-14 15:26:46 +01001580
1581 return iommu_fwspec_add_ids(dev, &fwid, 1);
1582}
1583
Eric Augerf3ebee82017-01-19 20:57:55 +00001584static void arm_smmu_get_resv_regions(struct device *dev,
1585 struct list_head *head)
1586{
1587 struct iommu_resv_region *region;
1588 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1589
1590 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00001591 prot, IOMMU_RESV_SW_MSI);
Eric Augerf3ebee82017-01-19 20:57:55 +00001592 if (!region)
1593 return;
1594
1595 list_add_tail(&region->list, head);
Robin Murphy273df962017-03-16 17:00:19 +00001596
1597 iommu_dma_get_resv_regions(dev, head);
Eric Augerf3ebee82017-01-19 20:57:55 +00001598}
1599
1600static void arm_smmu_put_resv_regions(struct device *dev,
1601 struct list_head *head)
1602{
1603 struct iommu_resv_region *entry, *next;
1604
1605 list_for_each_entry_safe(entry, next, head, list)
1606 kfree(entry);
1607}
1608
Will Deacon518f7132014-11-14 17:17:54 +00001609static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001610 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001611 .domain_alloc = arm_smmu_domain_alloc,
1612 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001613 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001614 .map = arm_smmu_map,
1615 .unmap = arm_smmu_unmap,
Robin Murphy44f68762018-09-20 17:10:27 +01001616 .flush_iotlb_all = arm_smmu_flush_iotlb_all,
Robin Murphy32b12442017-09-28 15:55:01 +01001617 .iotlb_sync = arm_smmu_iotlb_sync,
Will Deaconc752ce42014-06-25 22:46:31 +01001618 .iova_to_phys = arm_smmu_iova_to_phys,
1619 .add_device = arm_smmu_add_device,
1620 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001621 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001622 .domain_get_attr = arm_smmu_domain_get_attr,
1623 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy021bb842016-09-14 15:26:46 +01001624 .of_xlate = arm_smmu_of_xlate,
Eric Augerf3ebee82017-01-19 20:57:55 +00001625 .get_resv_regions = arm_smmu_get_resv_regions,
1626 .put_resv_regions = arm_smmu_put_resv_regions,
Will Deacon518f7132014-11-14 17:17:54 +00001627 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001628};
1629
1630static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1631{
1632 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001633 int i;
Peng Fan3ca37122016-05-03 21:50:30 +08001634 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001635
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001636 /* clear global FSR */
1637 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1638 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001639
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001640 /*
1641 * Reset stream mapping groups: Initial values mark all SMRn as
1642 * invalid and all S2CRn as bypass unless overridden.
1643 */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001644 for (i = 0; i < smmu->num_mapping_groups; ++i)
1645 arm_smmu_write_sme(smmu, i);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001646
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301647 if (smmu->model == ARM_MMU500) {
1648 /*
1649 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1650 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1651 * bit is only present in MMU-500r2 onwards.
1652 */
1653 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1654 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
Peng Fan3ca37122016-05-03 21:50:30 +08001655 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301656 if (major >= 2)
1657 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1658 /*
1659 * Allow unmatched Stream IDs to allocate bypass
1660 * TLB entries for reduced latency.
1661 */
Feng Kan74f55d32017-10-11 15:08:39 -07001662 reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN;
Peng Fan3ca37122016-05-03 21:50:30 +08001663 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1664 }
1665
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001666 /* Make sure all context banks are disabled and clear CB_FSR */
1667 for (i = 0; i < smmu->num_context_banks; ++i) {
Robin Murphy90df3732017-08-08 14:56:14 +01001668 void __iomem *cb_base = ARM_SMMU_CB(smmu, i);
1669
1670 arm_smmu_write_context_bank(smmu, i);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001671 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001672 /*
1673 * Disable MMU-500's not-particularly-beneficial next-page
1674 * prefetcher for the sake of errata #841119 and #826419.
1675 */
1676 if (smmu->model == ARM_MMU500) {
1677 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1678 reg &= ~ARM_MMU500_ACTLR_CPRE;
1679 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1680 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001681 }
Will Deacon1463fe42013-07-31 19:21:27 +01001682
Will Deacon45ae7cf2013-06-24 18:31:25 +01001683 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001684 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1685 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1686
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001687 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001688
Will Deacon45ae7cf2013-06-24 18:31:25 +01001689 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001690 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001691
1692 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001693 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001694
Robin Murphy25a1c962016-02-10 14:25:33 +00001695 /* Enable client access, handling unmatched streams as appropriate */
1696 reg &= ~sCR0_CLIENTPD;
1697 if (disable_bypass)
1698 reg |= sCR0_USFCFG;
1699 else
1700 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001701
1702 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001703 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001704
1705 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001706 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001707
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001708 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1709 reg |= sCR0_VMID16EN;
1710
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001711 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1712 reg |= sCR0_EXIDENABLE;
1713
Will Deacon45ae7cf2013-06-24 18:31:25 +01001714 /* Push the button */
Robin Murphy11febfc2017-03-30 17:56:31 +01001715 arm_smmu_tlb_sync_global(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001716 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001717}
1718
1719static int arm_smmu_id_size_to_bits(int size)
1720{
1721 switch (size) {
1722 case 0:
1723 return 32;
1724 case 1:
1725 return 36;
1726 case 2:
1727 return 40;
1728 case 3:
1729 return 42;
1730 case 4:
1731 return 44;
1732 case 5:
1733 default:
1734 return 48;
1735 }
1736}
1737
1738static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1739{
1740 unsigned long size;
1741 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1742 u32 id;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001743 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001744 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001745
1746 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001747 dev_notice(smmu->dev, "SMMUv%d with:\n",
1748 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001749
1750 /* ID0 */
1751 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001752
1753 /* Restrict available stages based on module parameter */
1754 if (force_stage == 1)
1755 id &= ~(ID0_S2TS | ID0_NTS);
1756 else if (force_stage == 2)
1757 id &= ~(ID0_S1TS | ID0_NTS);
1758
Will Deacon45ae7cf2013-06-24 18:31:25 +01001759 if (id & ID0_S1TS) {
1760 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1761 dev_notice(smmu->dev, "\tstage 1 translation\n");
1762 }
1763
1764 if (id & ID0_S2TS) {
1765 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1766 dev_notice(smmu->dev, "\tstage 2 translation\n");
1767 }
1768
1769 if (id & ID0_NTS) {
1770 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1771 dev_notice(smmu->dev, "\tnested translation\n");
1772 }
1773
1774 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001775 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001776 dev_err(smmu->dev, "\tno translation support!\n");
1777 return -ENODEV;
1778 }
1779
Robin Murphyb7862e32016-04-13 18:13:03 +01001780 if ((id & ID0_S1TS) &&
1781 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001782 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1783 dev_notice(smmu->dev, "\taddress translation ops\n");
1784 }
1785
Robin Murphybae2c2d2015-07-29 19:46:05 +01001786 /*
1787 * In order for DMA API calls to work properly, we must defer to what
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001788 * the FW says about coherency, regardless of what the hardware claims.
Robin Murphybae2c2d2015-07-29 19:46:05 +01001789 * Fortunately, this also opens up a workaround for systems where the
1790 * ID register value has ended up configured incorrectly.
1791 */
Robin Murphybae2c2d2015-07-29 19:46:05 +01001792 cttw_reg = !!(id & ID0_CTTW);
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001793 if (cttw_fw || cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001794 dev_notice(smmu->dev, "\t%scoherent table walk\n",
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001795 cttw_fw ? "" : "non-");
1796 if (cttw_fw != cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001797 dev_notice(smmu->dev,
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001798 "\t(IDR0.CTTW overridden by FW configuration)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001799
Robin Murphy21174242016-09-12 17:13:48 +01001800 /* Max. number of entries we have for stream matching/indexing */
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001801 if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
1802 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1803 size = 1 << 16;
1804 } else {
1805 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
1806 }
Robin Murphy21174242016-09-12 17:13:48 +01001807 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001808 if (id & ID0_SMS) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001809 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy21174242016-09-12 17:13:48 +01001810 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
1811 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001812 dev_err(smmu->dev,
1813 "stream-matching supported, but no SMRs present!\n");
1814 return -ENODEV;
1815 }
1816
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001817 /* Zero-initialised to mark as invalid */
1818 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1819 GFP_KERNEL);
1820 if (!smmu->smrs)
1821 return -ENOMEM;
1822
Will Deacon45ae7cf2013-06-24 18:31:25 +01001823 dev_notice(smmu->dev,
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001824 "\tstream matching with %lu register groups", size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001825 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001826 /* s2cr->type == 0 means translation, so initialise explicitly */
1827 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1828 GFP_KERNEL);
1829 if (!smmu->s2crs)
1830 return -ENOMEM;
1831 for (i = 0; i < size; i++)
1832 smmu->s2crs[i] = s2cr_init_val;
1833
Robin Murphy21174242016-09-12 17:13:48 +01001834 smmu->num_mapping_groups = size;
Robin Murphy588888a2016-09-12 17:13:54 +01001835 mutex_init(&smmu->stream_map_mutex);
Will Deacon8e517e72017-07-06 15:55:48 +01001836 spin_lock_init(&smmu->global_sync_lock);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001837
Robin Murphy7602b872016-04-28 17:12:09 +01001838 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1839 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1840 if (!(id & ID0_PTFS_NO_AARCH32S))
1841 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1842 }
1843
Will Deacon45ae7cf2013-06-24 18:31:25 +01001844 /* ID1 */
1845 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001846 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001847
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001848 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001849 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Robin Murphy452107c2017-03-30 17:56:30 +01001850 size <<= smmu->pgshift;
1851 if (smmu->cb_base != gr0_base + size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001852 dev_warn(smmu->dev,
Robin Murphy452107c2017-03-30 17:56:30 +01001853 "SMMU address space size (0x%lx) differs from mapped region size (0x%tx)!\n",
1854 size * 2, (smmu->cb_base - gr0_base) * 2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001855
Will Deacon518f7132014-11-14 17:17:54 +00001856 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001857 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1858 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1859 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1860 return -ENODEV;
1861 }
1862 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1863 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001864 /*
1865 * Cavium CN88xx erratum #27704.
1866 * Ensure ASID and VMID allocation is unique across all SMMUs in
1867 * the system.
1868 */
1869 if (smmu->model == CAVIUM_SMMUV2) {
1870 smmu->cavium_id_base =
1871 atomic_add_return(smmu->num_context_banks,
1872 &cavium_smmu_context_count);
1873 smmu->cavium_id_base -= smmu->num_context_banks;
Robert Richter53c35dce2017-03-13 11:39:01 +01001874 dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
Robin Murphye086d912016-04-13 18:12:58 +01001875 }
Robin Murphy90df3732017-08-08 14:56:14 +01001876 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
1877 sizeof(*smmu->cbs), GFP_KERNEL);
1878 if (!smmu->cbs)
1879 return -ENOMEM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001880
1881 /* ID2 */
1882 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1883 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001884 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001885
Will Deacon518f7132014-11-14 17:17:54 +00001886 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001887 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001888 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001889
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001890 if (id & ID2_VMID16)
1891 smmu->features |= ARM_SMMU_FEAT_VMID16;
1892
Robin Murphyf1d84542015-03-04 16:41:05 +00001893 /*
1894 * What the page table walker can address actually depends on which
1895 * descriptor format is in use, but since a) we don't know that yet,
1896 * and b) it can vary per context bank, this will have to do...
1897 */
1898 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1899 dev_warn(smmu->dev,
1900 "failed to set DMA mask for table walker\n");
1901
Robin Murphyb7862e32016-04-13 18:13:03 +01001902 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001903 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001904 if (smmu->version == ARM_SMMU_V1_64K)
1905 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001906 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001907 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00001908 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001909 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001910 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001911 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001912 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001913 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001914 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001915 }
1916
Robin Murphy7602b872016-04-28 17:12:09 +01001917 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01001918 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01001919 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01001920 if (smmu->features &
1921 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01001922 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01001923 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01001924 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01001925 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01001926 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01001927
Robin Murphyd5466352016-05-09 17:20:09 +01001928 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1929 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1930 else
1931 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1932 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1933 smmu->pgsize_bitmap);
1934
Will Deacon518f7132014-11-14 17:17:54 +00001935
Will Deacon28d60072014-09-01 16:24:48 +01001936 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1937 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001938 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001939
1940 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1941 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001942 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001943
Will Deacon45ae7cf2013-06-24 18:31:25 +01001944 return 0;
1945}
1946
Robin Murphy67b65a32016-04-13 18:12:57 +01001947struct arm_smmu_match_data {
1948 enum arm_smmu_arch_version version;
1949 enum arm_smmu_implementation model;
1950};
1951
1952#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
Sricharan R96a299d2018-12-04 11:52:09 +05301953static const struct arm_smmu_match_data name = { .version = ver, .model = imp }
Robin Murphy67b65a32016-04-13 18:12:57 +01001954
1955ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1956ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01001957ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001958ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01001959ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01001960
Joerg Roedel09b52692014-10-02 12:24:45 +02001961static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01001962 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1963 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1964 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01001965 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001966 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01001967 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01001968 { },
1969};
1970MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1971
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001972#ifdef CONFIG_ACPI
1973static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
1974{
1975 int ret = 0;
1976
1977 switch (model) {
1978 case ACPI_IORT_SMMU_V1:
1979 case ACPI_IORT_SMMU_CORELINK_MMU400:
1980 smmu->version = ARM_SMMU_V1;
1981 smmu->model = GENERIC_SMMU;
1982 break;
Robin Murphy84c24372017-06-19 16:41:56 +01001983 case ACPI_IORT_SMMU_CORELINK_MMU401:
1984 smmu->version = ARM_SMMU_V1_64K;
1985 smmu->model = GENERIC_SMMU;
1986 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001987 case ACPI_IORT_SMMU_V2:
1988 smmu->version = ARM_SMMU_V2;
1989 smmu->model = GENERIC_SMMU;
1990 break;
1991 case ACPI_IORT_SMMU_CORELINK_MMU500:
1992 smmu->version = ARM_SMMU_V2;
1993 smmu->model = ARM_MMU500;
1994 break;
Robin Murphy84c24372017-06-19 16:41:56 +01001995 case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
1996 smmu->version = ARM_SMMU_V2;
1997 smmu->model = CAVIUM_SMMUV2;
1998 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001999 default:
2000 ret = -ENODEV;
2001 }
2002
2003 return ret;
2004}
2005
2006static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2007 struct arm_smmu_device *smmu)
2008{
2009 struct device *dev = smmu->dev;
2010 struct acpi_iort_node *node =
2011 *(struct acpi_iort_node **)dev_get_platdata(dev);
2012 struct acpi_iort_smmu *iort_smmu;
2013 int ret;
2014
2015 /* Retrieve SMMU1/2 specific data */
2016 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
2017
2018 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
2019 if (ret < 0)
2020 return ret;
2021
2022 /* Ignore the configuration access interrupt */
2023 smmu->num_global_irqs = 1;
2024
2025 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
2026 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2027
2028 return 0;
2029}
2030#else
2031static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2032 struct arm_smmu_device *smmu)
2033{
2034 return -ENODEV;
2035}
2036#endif
2037
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002038static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2039 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002040{
Robin Murphy67b65a32016-04-13 18:12:57 +01002041 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002042 struct device *dev = &pdev->dev;
Robin Murphy021bb842016-09-14 15:26:46 +01002043 bool legacy_binding;
2044
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002045 if (of_property_read_u32(dev->of_node, "#global-interrupts",
2046 &smmu->num_global_irqs)) {
2047 dev_err(dev, "missing #global-interrupts property\n");
2048 return -ENODEV;
2049 }
2050
2051 data = of_device_get_match_data(dev);
2052 smmu->version = data->version;
2053 smmu->model = data->model;
2054
2055 parse_driver_options(smmu);
2056
Robin Murphy021bb842016-09-14 15:26:46 +01002057 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
2058 if (legacy_binding && !using_generic_binding) {
2059 if (!using_legacy_binding)
2060 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
2061 using_legacy_binding = true;
2062 } else if (!legacy_binding && !using_legacy_binding) {
2063 using_generic_binding = true;
2064 } else {
2065 dev_err(dev, "not probing due to mismatched DT properties\n");
2066 return -ENODEV;
2067 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002068
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002069 if (of_dma_is_coherent(dev->of_node))
2070 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2071
2072 return 0;
2073}
2074
Robin Murphyf6810c12017-04-10 16:51:05 +05302075static void arm_smmu_bus_init(void)
2076{
2077 /* Oh, for a proper bus abstraction */
2078 if (!iommu_present(&platform_bus_type))
2079 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2080#ifdef CONFIG_ARM_AMBA
2081 if (!iommu_present(&amba_bustype))
2082 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2083#endif
2084#ifdef CONFIG_PCI
2085 if (!iommu_present(&pci_bus_type)) {
2086 pci_request_acs();
2087 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2088 }
2089#endif
Nipun Guptaeab03e22018-09-10 19:19:18 +05302090#ifdef CONFIG_FSL_MC_BUS
2091 if (!iommu_present(&fsl_mc_bus_type))
2092 bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
2093#endif
Robin Murphyf6810c12017-04-10 16:51:05 +05302094}
2095
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002096static int arm_smmu_device_probe(struct platform_device *pdev)
2097{
2098 struct resource *res;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002099 resource_size_t ioaddr;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002100 struct arm_smmu_device *smmu;
2101 struct device *dev = &pdev->dev;
2102 int num_irqs, i, err;
2103
Will Deacon45ae7cf2013-06-24 18:31:25 +01002104 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2105 if (!smmu) {
2106 dev_err(dev, "failed to allocate arm_smmu_device\n");
2107 return -ENOMEM;
2108 }
2109 smmu->dev = dev;
2110
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002111 if (dev->of_node)
2112 err = arm_smmu_device_dt_probe(pdev, smmu);
2113 else
2114 err = arm_smmu_device_acpi_probe(pdev, smmu);
2115
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002116 if (err)
2117 return err;
Robin Murphy09360402014-08-28 17:51:59 +01002118
Will Deacon45ae7cf2013-06-24 18:31:25 +01002119 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002120 ioaddr = res->start;
Julia Lawall8a7f4312013-08-19 12:20:37 +01002121 smmu->base = devm_ioremap_resource(dev, res);
2122 if (IS_ERR(smmu->base))
2123 return PTR_ERR(smmu->base);
Robin Murphy452107c2017-03-30 17:56:30 +01002124 smmu->cb_base = smmu->base + resource_size(res) / 2;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002125
Will Deacon45ae7cf2013-06-24 18:31:25 +01002126 num_irqs = 0;
2127 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2128 num_irqs++;
2129 if (num_irqs > smmu->num_global_irqs)
2130 smmu->num_context_irqs++;
2131 }
2132
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002133 if (!smmu->num_context_irqs) {
2134 dev_err(dev, "found %d interrupts but expected at least %d\n",
2135 num_irqs, smmu->num_global_irqs + 1);
2136 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002137 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002138
Kees Cooka86854d2018-06-12 14:07:58 -07002139 smmu->irqs = devm_kcalloc(dev, num_irqs, sizeof(*smmu->irqs),
Will Deacon45ae7cf2013-06-24 18:31:25 +01002140 GFP_KERNEL);
2141 if (!smmu->irqs) {
2142 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2143 return -ENOMEM;
2144 }
2145
2146 for (i = 0; i < num_irqs; ++i) {
2147 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002148
Will Deacon45ae7cf2013-06-24 18:31:25 +01002149 if (irq < 0) {
2150 dev_err(dev, "failed to get irq index %d\n", i);
2151 return -ENODEV;
2152 }
2153 smmu->irqs[i] = irq;
2154 }
2155
Sricharan R96a299d2018-12-04 11:52:09 +05302156 err = devm_clk_bulk_get_all(dev, &smmu->clks);
2157 if (err < 0) {
2158 dev_err(dev, "failed to get clocks %d\n", err);
2159 return err;
2160 }
2161 smmu->num_clks = err;
2162
2163 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
2164 if (err)
2165 return err;
2166
Olav Haugan3c8766d2014-08-22 17:12:32 -07002167 err = arm_smmu_device_cfg_probe(smmu);
2168 if (err)
2169 return err;
2170
Vivek Gautamd1e20222018-07-19 23:23:56 +05302171 if (smmu->version == ARM_SMMU_V2) {
2172 if (smmu->num_context_banks > smmu->num_context_irqs) {
2173 dev_err(dev,
2174 "found only %d context irq(s) but %d required\n",
2175 smmu->num_context_irqs, smmu->num_context_banks);
2176 return -ENODEV;
2177 }
2178
2179 /* Ignore superfluous interrupts */
2180 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002181 }
2182
Will Deacon45ae7cf2013-06-24 18:31:25 +01002183 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08002184 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2185 arm_smmu_global_fault,
2186 IRQF_SHARED,
2187 "arm-smmu global fault",
2188 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002189 if (err) {
2190 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2191 i, smmu->irqs[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01002192 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002193 }
2194 }
2195
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002196 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2197 "smmu.%pa", &ioaddr);
2198 if (err) {
2199 dev_err(dev, "Failed to register iommu in sysfs\n");
2200 return err;
2201 }
2202
2203 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2204 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2205
2206 err = iommu_device_register(&smmu->iommu);
2207 if (err) {
2208 dev_err(dev, "Failed to register iommu\n");
2209 return err;
2210 }
2211
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002212 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01002213 arm_smmu_device_reset(smmu);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03002214 arm_smmu_test_smr_masks(smmu);
Robin Murphy021bb842016-09-14 15:26:46 +01002215
Robin Murphyf6810c12017-04-10 16:51:05 +05302216 /*
2217 * For ACPI and generic DT bindings, an SMMU will be probed before
2218 * any device which might need it, so we want the bus ops in place
2219 * ready to handle default domain setup as soon as any SMMU exists.
2220 */
2221 if (!using_legacy_binding)
2222 arm_smmu_bus_init();
2223
Will Deacon45ae7cf2013-06-24 18:31:25 +01002224 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002225}
2226
Robin Murphyf6810c12017-04-10 16:51:05 +05302227/*
2228 * With the legacy DT binding in play, though, we have no guarantees about
2229 * probe order, but then we're also not doing default domains, so we can
2230 * delay setting bus ops until we're sure every possible SMMU is ready,
2231 * and that way ensure that no add_device() calls get missed.
2232 */
2233static int arm_smmu_legacy_bus_init(void)
2234{
2235 if (using_legacy_binding)
2236 arm_smmu_bus_init();
2237 return 0;
2238}
2239device_initcall_sync(arm_smmu_legacy_bus_init);
2240
Will Deacon45ae7cf2013-06-24 18:31:25 +01002241static int arm_smmu_device_remove(struct platform_device *pdev)
2242{
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002243 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002244
2245 if (!smmu)
2246 return -ENODEV;
2247
Will Deaconecfadb62013-07-31 19:21:28 +01002248 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002249 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002250
Will Deacon45ae7cf2013-06-24 18:31:25 +01002251 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002252 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Sricharan R96a299d2018-12-04 11:52:09 +05302253
2254 clk_bulk_disable_unprepare(smmu->num_clks, smmu->clks);
2255
Will Deacon45ae7cf2013-06-24 18:31:25 +01002256 return 0;
2257}
2258
Nate Watterson7aa86192017-06-29 18:18:15 -04002259static void arm_smmu_device_shutdown(struct platform_device *pdev)
2260{
2261 arm_smmu_device_remove(pdev);
2262}
2263
Sricharan R96a299d2018-12-04 11:52:09 +05302264static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
Robin Murphya2d866f2017-08-08 14:56:15 +01002265{
2266 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
Sricharan R96a299d2018-12-04 11:52:09 +05302267 int ret;
2268
2269 ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
2270 if (ret)
2271 return ret;
Robin Murphya2d866f2017-08-08 14:56:15 +01002272
2273 arm_smmu_device_reset(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302274
Robin Murphya2d866f2017-08-08 14:56:15 +01002275 return 0;
2276}
2277
Sricharan R96a299d2018-12-04 11:52:09 +05302278static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
2279{
2280 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2281
2282 clk_bulk_disable(smmu->num_clks, smmu->clks);
2283
2284 return 0;
2285}
2286
2287static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
2288{
2289 if (pm_runtime_suspended(dev))
2290 return 0;
2291
2292 return arm_smmu_runtime_resume(dev);
2293}
2294
2295static int __maybe_unused arm_smmu_pm_suspend(struct device *dev)
2296{
2297 if (pm_runtime_suspended(dev))
2298 return 0;
2299
2300 return arm_smmu_runtime_suspend(dev);
2301}
2302
2303static const struct dev_pm_ops arm_smmu_pm_ops = {
2304 SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_pm_suspend, arm_smmu_pm_resume)
2305 SET_RUNTIME_PM_OPS(arm_smmu_runtime_suspend,
2306 arm_smmu_runtime_resume, NULL)
2307};
Robin Murphya2d866f2017-08-08 14:56:15 +01002308
Will Deacon45ae7cf2013-06-24 18:31:25 +01002309static struct platform_driver arm_smmu_driver = {
2310 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002311 .name = "arm-smmu",
2312 .of_match_table = of_match_ptr(arm_smmu_of_match),
Robin Murphya2d866f2017-08-08 14:56:15 +01002313 .pm = &arm_smmu_pm_ops,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002314 },
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002315 .probe = arm_smmu_device_probe,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002316 .remove = arm_smmu_device_remove,
Nate Watterson7aa86192017-06-29 18:18:15 -04002317 .shutdown = arm_smmu_device_shutdown,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002318};
Robin Murphyf6810c12017-04-10 16:51:05 +05302319module_platform_driver(arm_smmu_driver);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002320
Will Deacon45ae7cf2013-06-24 18:31:25 +01002321MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2322MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2323MODULE_LICENSE("GPL v2");