blob: 1917d214c4d94388e434aed33870255b5faa2ee1 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +030027 * - Extended Stream ID (16 bit)
Will Deacon45ae7cf2013-06-24 18:31:25 +010028 */
29
30#define pr_fmt(fmt) "arm-smmu: " fmt
31
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +000032#include <linux/acpi.h>
33#include <linux/acpi_iort.h>
Robin Murphy1f3d5ca2016-09-12 17:13:49 +010034#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010035#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000036#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010037#include <linux/dma-mapping.h>
38#include <linux/err.h>
39#include <linux/interrupt.h>
40#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010041#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010042#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000043#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010044#include <linux/module.h>
45#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010046#include <linux/of_address.h>
Robin Murphyd6fc5d92016-09-12 17:13:52 +010047#include <linux/of_device.h>
Robin Murphyadfec2e2016-09-12 17:13:55 +010048#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010049#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010050#include <linux/platform_device.h>
Sricharan R96a299d2018-12-04 11:52:09 +053051#include <linux/pm_runtime.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010052#include <linux/slab.h>
53#include <linux/spinlock.h>
54
55#include <linux/amba/bus.h>
Nipun Guptaeab03e22018-09-10 19:19:18 +053056#include <linux/fsl/mc.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010057
Will Deacon518f7132014-11-14 17:17:54 +000058#include "io-pgtable.h"
Rob Clark2b037742017-08-09 10:43:03 -040059#include "arm-smmu-regs.h"
60
61#define ARM_MMU500_ACTLR_CPRE (1 << 1)
62
63#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
Feng Kan74f55d32017-10-11 15:08:39 -070064#define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10)
Rob Clark2b037742017-08-09 10:43:03 -040065#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
66
67#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
68#define TLB_SPIN_COUNT 10
Will Deacon45ae7cf2013-06-24 18:31:25 +010069
Will Deacon45ae7cf2013-06-24 18:31:25 +010070/* Maximum number of context banks per SMMU */
71#define ARM_SMMU_MAX_CBS 128
72
Will Deacon45ae7cf2013-06-24 18:31:25 +010073/* SMMU global address space */
74#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010075#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010076
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000077/*
78 * SMMU global address space with conditional offset to access secure
79 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
80 * nsGFSYNR0: 0x450)
81 */
82#define ARM_SMMU_GR0_NS(smmu) \
83 ((smmu)->base + \
84 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
85 ? 0x400 : 0))
86
Robin Murphyf9a05f02016-04-13 18:13:01 +010087/*
88 * Some 64-bit registers only make sense to write atomically, but in such
89 * cases all the data relevant to AArch32 formats lies within the lower word,
90 * therefore this actually makes more sense than it might first appear.
91 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010092#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010093#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010094#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010095#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010096#endif
97
Will Deacon45ae7cf2013-06-24 18:31:25 +010098/* Translation context bank */
Robin Murphy452107c2017-03-30 17:56:30 +010099#define ARM_SMMU_CB(smmu, n) ((smmu)->cb_base + ((n) << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100100
Eric Augerf3ebee82017-01-19 20:57:55 +0000101#define MSI_IOVA_BASE 0x8000000
102#define MSI_IOVA_LENGTH 0x100000
103
Will Deacon4cf740b2014-07-14 19:47:39 +0100104static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000105module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100106MODULE_PARM_DESC(force_stage,
107 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000108static bool disable_bypass;
109module_param(disable_bypass, bool, S_IRUGO);
110MODULE_PARM_DESC(disable_bypass,
111 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100112
Robin Murphy09360402014-08-28 17:51:59 +0100113enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100114 ARM_SMMU_V1,
115 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100116 ARM_SMMU_V2,
117};
118
Robin Murphy67b65a32016-04-13 18:12:57 +0100119enum arm_smmu_implementation {
120 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100121 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100122 CAVIUM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100123};
124
Robin Murphy8e8b2032016-09-12 17:13:50 +0100125struct arm_smmu_s2cr {
Robin Murphy588888a2016-09-12 17:13:54 +0100126 struct iommu_group *group;
127 int count;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100128 enum arm_smmu_s2cr_type type;
129 enum arm_smmu_s2cr_privcfg privcfg;
130 u8 cbndx;
131};
132
133#define s2cr_init_val (struct arm_smmu_s2cr){ \
134 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
135}
136
Will Deacon45ae7cf2013-06-24 18:31:25 +0100137struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100138 u16 mask;
139 u16 id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100140 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100141};
142
Robin Murphy90df3732017-08-08 14:56:14 +0100143struct arm_smmu_cb {
144 u64 ttbr[2];
145 u32 tcr[2];
146 u32 mair[2];
147 struct arm_smmu_cfg *cfg;
148};
149
Will Deacona9a1b0b2014-05-01 18:05:08 +0100150struct arm_smmu_master_cfg {
Robin Murphyf80cd882016-09-14 15:21:39 +0100151 struct arm_smmu_device *smmu;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100152 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100153};
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100154#define INVALID_SMENDX -1
Robin Murphyadfec2e2016-09-12 17:13:55 +0100155#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
156#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000157#define fwspec_smendx(fw, i) \
158 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphyadfec2e2016-09-12 17:13:55 +0100159#define for_each_cfg_sme(fw, i, idx) \
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000160 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100161
162struct arm_smmu_device {
163 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100164
165 void __iomem *base;
Robin Murphy452107c2017-03-30 17:56:30 +0100166 void __iomem *cb_base;
Will Deaconc757e852014-07-30 11:33:25 +0100167 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100168
169#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
170#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
171#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
172#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
173#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000174#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800175#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100176#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
177#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
178#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
179#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
180#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300181#define ARM_SMMU_FEAT_EXIDS (1 << 12)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100182 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000183
184#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
185 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100186 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100187 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100188
189 u32 num_context_banks;
190 u32 num_s2_context_banks;
191 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
Robin Murphy90df3732017-08-08 14:56:14 +0100192 struct arm_smmu_cb *cbs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100193 atomic_t irptndx;
194
195 u32 num_mapping_groups;
Robin Murphy21174242016-09-12 17:13:48 +0100196 u16 streamid_mask;
197 u16 smr_mask_mask;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100198 struct arm_smmu_smr *smrs;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100199 struct arm_smmu_s2cr *s2crs;
Robin Murphy588888a2016-09-12 17:13:54 +0100200 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100201
Will Deacon518f7132014-11-14 17:17:54 +0000202 unsigned long va_size;
203 unsigned long ipa_size;
204 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100205 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100206
207 u32 num_global_irqs;
208 u32 num_context_irqs;
209 unsigned int *irqs;
Sricharan R96a299d2018-12-04 11:52:09 +0530210 struct clk_bulk_data *clks;
211 int num_clks;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100212
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800213 u32 cavium_id_base; /* Specific to Cavium */
Joerg Roedel9648cbc2017-02-01 18:11:36 +0100214
Will Deacon8e517e72017-07-06 15:55:48 +0100215 spinlock_t global_sync_lock;
216
Joerg Roedel9648cbc2017-02-01 18:11:36 +0100217 /* IOMMU core code handle */
218 struct iommu_device iommu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100219};
220
Robin Murphy7602b872016-04-28 17:12:09 +0100221enum arm_smmu_context_fmt {
222 ARM_SMMU_CTX_FMT_NONE,
223 ARM_SMMU_CTX_FMT_AARCH64,
224 ARM_SMMU_CTX_FMT_AARCH32_L,
225 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100226};
227
228struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100229 u8 cbndx;
230 u8 irptndx;
Robin Murphy280b6832017-03-30 17:56:29 +0100231 union {
232 u16 asid;
233 u16 vmid;
234 };
Will Deacon45ae7cf2013-06-24 18:31:25 +0100235 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100236 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100237};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100238#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100239
Will Deaconc752ce42014-06-25 22:46:31 +0100240enum arm_smmu_domain_stage {
241 ARM_SMMU_DOMAIN_S1 = 0,
242 ARM_SMMU_DOMAIN_S2,
243 ARM_SMMU_DOMAIN_NESTED,
Will Deacon61bc6712017-01-06 16:56:03 +0000244 ARM_SMMU_DOMAIN_BYPASS,
Will Deaconc752ce42014-06-25 22:46:31 +0100245};
246
Will Deacon45ae7cf2013-06-24 18:31:25 +0100247struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100248 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000249 struct io_pgtable_ops *pgtbl_ops;
Robin Murphy32b12442017-09-28 15:55:01 +0100250 const struct iommu_gather_ops *tlb_ops;
Will Deacon44680ee2014-06-25 11:29:12 +0100251 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100252 enum arm_smmu_domain_stage stage;
Robin Murphy44f68762018-09-20 17:10:27 +0100253 bool non_strict;
Will Deacon518f7132014-11-14 17:17:54 +0000254 struct mutex init_mutex; /* Protects smmu pointer */
Will Deacon8e517e72017-07-06 15:55:48 +0100255 spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
Joerg Roedel1d672632015-03-26 13:43:10 +0100256 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100257};
258
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000259struct arm_smmu_option_prop {
260 u32 opt;
261 const char *prop;
262};
263
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800264static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
265
Robin Murphy021bb842016-09-14 15:26:46 +0100266static bool using_legacy_binding, using_generic_binding;
267
Mitchel Humpherys29073202014-07-08 09:52:18 -0700268static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000269 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
270 { 0, NULL},
271};
272
Sricharan Rd4a44f02018-12-04 11:52:10 +0530273static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
274{
275 if (pm_runtime_enabled(smmu->dev))
276 return pm_runtime_get_sync(smmu->dev);
277
278 return 0;
279}
280
281static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
282{
283 if (pm_runtime_enabled(smmu->dev))
284 pm_runtime_put(smmu->dev);
285}
286
Joerg Roedel1d672632015-03-26 13:43:10 +0100287static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
288{
289 return container_of(dom, struct arm_smmu_domain, domain);
290}
291
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000292static void parse_driver_options(struct arm_smmu_device *smmu)
293{
294 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700295
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000296 do {
297 if (of_property_read_bool(smmu->dev->of_node,
298 arm_smmu_options[i].prop)) {
299 smmu->options |= arm_smmu_options[i].opt;
300 dev_notice(smmu->dev, "option %s\n",
301 arm_smmu_options[i].prop);
302 }
303 } while (arm_smmu_options[++i].opt);
304}
305
Will Deacon8f68f8e2014-07-15 11:27:08 +0100306static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100307{
308 if (dev_is_pci(dev)) {
309 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700310
Will Deacona9a1b0b2014-05-01 18:05:08 +0100311 while (!pci_is_root_bus(bus))
312 bus = bus->parent;
Robin Murphyf80cd882016-09-14 15:21:39 +0100313 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100314 }
315
Robin Murphyf80cd882016-09-14 15:21:39 +0100316 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100317}
318
Robin Murphyf80cd882016-09-14 15:21:39 +0100319static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100320{
Robin Murphyf80cd882016-09-14 15:21:39 +0100321 *((__be32 *)data) = cpu_to_be32(alias);
322 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100323}
324
Robin Murphyf80cd882016-09-14 15:21:39 +0100325static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100326{
Robin Murphyf80cd882016-09-14 15:21:39 +0100327 struct of_phandle_iterator *it = *(void **)data;
328 struct device_node *np = it->node;
329 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100330
Robin Murphyf80cd882016-09-14 15:21:39 +0100331 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
332 "#stream-id-cells", 0)
333 if (it->node == np) {
334 *(void **)data = dev;
335 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700336 }
Robin Murphyf80cd882016-09-14 15:21:39 +0100337 it->node = np;
338 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100339}
340
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100341static struct platform_driver arm_smmu_driver;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100342static struct iommu_ops arm_smmu_ops;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100343
Robin Murphyadfec2e2016-09-12 17:13:55 +0100344static int arm_smmu_register_legacy_master(struct device *dev,
345 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100346{
Robin Murphyadfec2e2016-09-12 17:13:55 +0100347 struct device *smmu_dev;
Robin Murphyf80cd882016-09-14 15:21:39 +0100348 struct device_node *np;
349 struct of_phandle_iterator it;
350 void *data = &it;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100351 u32 *sids;
Robin Murphyf80cd882016-09-14 15:21:39 +0100352 __be32 pci_sid;
353 int err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100354
Robin Murphyf80cd882016-09-14 15:21:39 +0100355 np = dev_get_dev_node(dev);
356 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
357 of_node_put(np);
358 return -ENODEV;
359 }
360
361 it.node = np;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100362 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
363 __find_legacy_master_phandle);
Robin Murphyadfec2e2016-09-12 17:13:55 +0100364 smmu_dev = data;
Robin Murphyf80cd882016-09-14 15:21:39 +0100365 of_node_put(np);
366 if (err == 0)
367 return -ENODEV;
368 if (err < 0)
369 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100370
Robin Murphyf80cd882016-09-14 15:21:39 +0100371 if (dev_is_pci(dev)) {
372 /* "mmu-masters" assumes Stream ID == Requester ID */
373 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
374 &pci_sid);
375 it.cur = &pci_sid;
376 it.cur_count = 1;
377 }
378
Robin Murphyadfec2e2016-09-12 17:13:55 +0100379 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
380 &arm_smmu_ops);
381 if (err)
382 return err;
383
384 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
385 if (!sids)
Robin Murphyf80cd882016-09-14 15:21:39 +0100386 return -ENOMEM;
387
Robin Murphyadfec2e2016-09-12 17:13:55 +0100388 *smmu = dev_get_drvdata(smmu_dev);
389 of_phandle_iterator_args(&it, sids, it.cur_count);
390 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
391 kfree(sids);
392 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100393}
394
395static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
396{
397 int idx;
398
399 do {
400 idx = find_next_zero_bit(map, end, start);
401 if (idx == end)
402 return -ENOSPC;
403 } while (test_and_set_bit(idx, map));
404
405 return idx;
406}
407
408static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
409{
410 clear_bit(idx, map);
411}
412
413/* Wait for any pending TLB invalidations to complete */
Robin Murphy11febfc2017-03-30 17:56:31 +0100414static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
415 void __iomem *sync, void __iomem *status)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100416{
Robin Murphy8513c892017-03-30 17:56:32 +0100417 unsigned int spin_cnt, delay;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100418
Robin Murphy11febfc2017-03-30 17:56:31 +0100419 writel_relaxed(0, sync);
Robin Murphy8513c892017-03-30 17:56:32 +0100420 for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
421 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
422 if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
423 return;
424 cpu_relax();
Will Deacon45ae7cf2013-06-24 18:31:25 +0100425 }
Robin Murphy8513c892017-03-30 17:56:32 +0100426 udelay(delay);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100427 }
Robin Murphy8513c892017-03-30 17:56:32 +0100428 dev_err_ratelimited(smmu->dev,
429 "TLB sync timed out -- SMMU may be deadlocked\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +0100430}
431
Robin Murphy11febfc2017-03-30 17:56:31 +0100432static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
Will Deacon1463fe42013-07-31 19:21:27 +0100433{
Robin Murphy11febfc2017-03-30 17:56:31 +0100434 void __iomem *base = ARM_SMMU_GR0(smmu);
Will Deacon8e517e72017-07-06 15:55:48 +0100435 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100436
Will Deacon8e517e72017-07-06 15:55:48 +0100437 spin_lock_irqsave(&smmu->global_sync_lock, flags);
Robin Murphy11febfc2017-03-30 17:56:31 +0100438 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
439 base + ARM_SMMU_GR0_sTLBGSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100440 spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000441}
442
Robin Murphy11febfc2017-03-30 17:56:31 +0100443static void arm_smmu_tlb_sync_context(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100444{
Will Deacon518f7132014-11-14 17:17:54 +0000445 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy11febfc2017-03-30 17:56:31 +0100446 struct arm_smmu_device *smmu = smmu_domain->smmu;
447 void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
Will Deacon8e517e72017-07-06 15:55:48 +0100448 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100449
Will Deacon8e517e72017-07-06 15:55:48 +0100450 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy11febfc2017-03-30 17:56:31 +0100451 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
452 base + ARM_SMMU_CB_TLBSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100453 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000454}
455
Robin Murphy11febfc2017-03-30 17:56:31 +0100456static void arm_smmu_tlb_sync_vmid(void *cookie)
457{
458 struct arm_smmu_domain *smmu_domain = cookie;
459
460 arm_smmu_tlb_sync_global(smmu_domain->smmu);
461}
462
463static void arm_smmu_tlb_inv_context_s1(void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000464{
465 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100466 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy11febfc2017-03-30 17:56:31 +0100467 void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
468
Robin Murphy44f68762018-09-20 17:10:27 +0100469 /*
470 * NOTE: this is not a relaxed write; it needs to guarantee that PTEs
471 * cleared by the current CPU are visible to the SMMU before the TLBI.
472 */
473 writel(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
Robin Murphy11febfc2017-03-30 17:56:31 +0100474 arm_smmu_tlb_sync_context(cookie);
475}
476
477static void arm_smmu_tlb_inv_context_s2(void *cookie)
478{
479 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100480 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy11febfc2017-03-30 17:56:31 +0100481 void __iomem *base = ARM_SMMU_GR0(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100482
Robin Murphy44f68762018-09-20 17:10:27 +0100483 /* NOTE: see above */
484 writel(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
Robin Murphy11febfc2017-03-30 17:56:31 +0100485 arm_smmu_tlb_sync_global(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100486}
487
Will Deacon518f7132014-11-14 17:17:54 +0000488static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000489 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000490{
491 struct arm_smmu_domain *smmu_domain = cookie;
492 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000493 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Robin Murphy11febfc2017-03-30 17:56:31 +0100494 void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000495
Will Deacon7d321bd32018-10-01 12:42:49 +0100496 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
497 wmb();
498
Will Deacon518f7132014-11-14 17:17:54 +0000499 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +0000500 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
501
Robin Murphy7602b872016-04-28 17:12:09 +0100502 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000503 iova &= ~12UL;
Robin Murphy280b6832017-03-30 17:56:29 +0100504 iova |= cfg->asid;
Robin Murphy75df1382015-12-07 18:18:52 +0000505 do {
506 writel_relaxed(iova, reg);
507 iova += granule;
508 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000509 } else {
510 iova >>= 12;
Robin Murphy280b6832017-03-30 17:56:29 +0100511 iova |= (u64)cfg->asid << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000512 do {
513 writeq_relaxed(iova, reg);
514 iova += granule >> 12;
515 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000516 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100517 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000518 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
519 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000520 iova >>= 12;
521 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100522 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000523 iova += granule >> 12;
524 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000525 }
526}
527
Robin Murphy11febfc2017-03-30 17:56:31 +0100528/*
529 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
530 * almost negligible, but the benefit of getting the first one in as far ahead
531 * of the sync as possible is significant, hence we don't just make this a
532 * no-op and set .tlb_sync to arm_smmu_inv_context_s2() as you might think.
533 */
534static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
535 size_t granule, bool leaf, void *cookie)
536{
537 struct arm_smmu_domain *smmu_domain = cookie;
538 void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
539
Will Deacon7d321bd32018-10-01 12:42:49 +0100540 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
541 wmb();
542
Robin Murphy11febfc2017-03-30 17:56:31 +0100543 writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
544}
545
546static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = {
547 .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
Will Deacon518f7132014-11-14 17:17:54 +0000548 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
Robin Murphy11febfc2017-03-30 17:56:31 +0100549 .tlb_sync = arm_smmu_tlb_sync_context,
550};
551
552static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = {
553 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
554 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
555 .tlb_sync = arm_smmu_tlb_sync_context,
556};
557
558static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = {
559 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
560 .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync,
561 .tlb_sync = arm_smmu_tlb_sync_vmid,
Will Deacon518f7132014-11-14 17:17:54 +0000562};
563
Will Deacon45ae7cf2013-06-24 18:31:25 +0100564static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
565{
Will Deacon3714ce1d2016-08-05 19:49:45 +0100566 u32 fsr, fsynr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100567 unsigned long iova;
568 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100569 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100570 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
571 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100572 void __iomem *cb_base;
573
Robin Murphy452107c2017-03-30 17:56:30 +0100574 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100575 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
576
577 if (!(fsr & FSR_FAULT))
578 return IRQ_NONE;
579
Will Deacon45ae7cf2013-06-24 18:31:25 +0100580 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Robin Murphyf9a05f02016-04-13 18:13:01 +0100581 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100582
Will Deacon3714ce1d2016-08-05 19:49:45 +0100583 dev_err_ratelimited(smmu->dev,
584 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
585 fsr, iova, fsynr, cfg->cbndx);
586
Will Deacon45ae7cf2013-06-24 18:31:25 +0100587 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100588 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100589}
590
591static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
592{
593 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
594 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000595 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100596
597 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
598 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
599 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
600 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
601
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000602 if (!gfsr)
603 return IRQ_NONE;
604
Will Deacon45ae7cf2013-06-24 18:31:25 +0100605 dev_err_ratelimited(smmu->dev,
606 "Unexpected global fault, this could be serious\n");
607 dev_err_ratelimited(smmu->dev,
608 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
609 gfsr, gfsynr0, gfsynr1, gfsynr2);
610
611 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100612 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100613}
614
Will Deacon518f7132014-11-14 17:17:54 +0000615static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
616 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100617{
Will Deacon44680ee2014-06-25 11:29:12 +0100618 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy90df3732017-08-08 14:56:14 +0100619 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
620 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
621
622 cb->cfg = cfg;
623
624 /* TTBCR */
625 if (stage1) {
626 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
627 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
628 } else {
629 cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
630 cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
631 cb->tcr[1] |= TTBCR2_SEP_UPSTREAM;
632 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
633 cb->tcr[1] |= TTBCR2_AS;
634 }
635 } else {
636 cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
637 }
638
639 /* TTBRs */
640 if (stage1) {
641 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
642 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
643 cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
644 } else {
645 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
646 cb->ttbr[0] |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
647 cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
648 cb->ttbr[1] |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
649 }
650 } else {
651 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
652 }
653
654 /* MAIRs (stage-1 only) */
655 if (stage1) {
656 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
657 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
658 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
659 } else {
660 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
661 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
662 }
663 }
664}
665
666static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
667{
668 u32 reg;
669 bool stage1;
670 struct arm_smmu_cb *cb = &smmu->cbs[idx];
671 struct arm_smmu_cfg *cfg = cb->cfg;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100672 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100673
Robin Murphy90df3732017-08-08 14:56:14 +0100674 cb_base = ARM_SMMU_CB(smmu, idx);
675
676 /* Unassigned context banks only need disabling */
677 if (!cfg) {
678 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
679 return;
680 }
681
Will Deacon45ae7cf2013-06-24 18:31:25 +0100682 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100683 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100684
Robin Murphy90df3732017-08-08 14:56:14 +0100685 /* CBA2R */
Will Deacon4a1c93c2015-03-04 12:21:03 +0000686 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100687 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
688 reg = CBA2R_RW64_64BIT;
689 else
690 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800691 /* 16-bit VMIDs live in CBA2R */
692 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Robin Murphy280b6832017-03-30 17:56:29 +0100693 reg |= cfg->vmid << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800694
Robin Murphy90df3732017-08-08 14:56:14 +0100695 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(idx));
Will Deacon4a1c93c2015-03-04 12:21:03 +0000696 }
697
Will Deacon45ae7cf2013-06-24 18:31:25 +0100698 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100699 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100700 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700701 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100702
Will Deacon57ca90f2014-02-06 14:59:05 +0000703 /*
704 * Use the weakest shareability/memory types, so they are
705 * overridden by the ttbcr/pte.
706 */
707 if (stage1) {
708 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
709 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800710 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
711 /* 8-bit VMIDs live in CBAR */
Robin Murphy280b6832017-03-30 17:56:29 +0100712 reg |= cfg->vmid << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000713 }
Robin Murphy90df3732017-08-08 14:56:14 +0100714 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(idx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100715
Sunil Goutham125458a2017-03-28 16:11:12 +0530716 /*
717 * TTBCR
718 * We must write this before the TTBRs, since it determines the
719 * access behaviour of some fields (in particular, ASID[15:8]).
720 */
Robin Murphy90df3732017-08-08 14:56:14 +0100721 if (stage1 && smmu->version > ARM_SMMU_V1)
722 writel_relaxed(cb->tcr[1], cb_base + ARM_SMMU_CB_TTBCR2);
723 writel_relaxed(cb->tcr[0], cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100724
Will Deacon45ae7cf2013-06-24 18:31:25 +0100725 /* TTBRs */
Robin Murphy90df3732017-08-08 14:56:14 +0100726 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
727 writel_relaxed(cfg->asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
728 writel_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0);
729 writel_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100730 } else {
Robin Murphy90df3732017-08-08 14:56:14 +0100731 writeq_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0);
732 if (stage1)
733 writeq_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100734 }
735
Will Deacon518f7132014-11-14 17:17:54 +0000736 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100737 if (stage1) {
Robin Murphy90df3732017-08-08 14:56:14 +0100738 writel_relaxed(cb->mair[0], cb_base + ARM_SMMU_CB_S1_MAIR0);
739 writel_relaxed(cb->mair[1], cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100740 }
741
Will Deacon45ae7cf2013-06-24 18:31:25 +0100742 /* SCTLR */
Robin Murphy60705292016-08-11 17:44:06 +0100743 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100744 if (stage1)
745 reg |= SCTLR_S1_ASIDPNE;
Robin Murphy90df3732017-08-08 14:56:14 +0100746 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
747 reg |= SCTLR_E;
748
Will Deacon25724842013-08-21 13:49:53 +0100749 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100750}
751
752static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100753 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100754{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100755 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000756 unsigned long ias, oas;
757 struct io_pgtable_ops *pgtbl_ops;
758 struct io_pgtable_cfg pgtbl_cfg;
759 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100760 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100761 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100762
Will Deacon518f7132014-11-14 17:17:54 +0000763 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100764 if (smmu_domain->smmu)
765 goto out_unlock;
766
Will Deacon61bc6712017-01-06 16:56:03 +0000767 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
768 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
769 smmu_domain->smmu = smmu;
770 goto out_unlock;
771 }
772
Will Deaconc752ce42014-06-25 22:46:31 +0100773 /*
774 * Mapping the requested stage onto what we support is surprisingly
775 * complicated, mainly because the spec allows S1+S2 SMMUs without
776 * support for nested translation. That means we end up with the
777 * following table:
778 *
779 * Requested Supported Actual
780 * S1 N S1
781 * S1 S1+S2 S1
782 * S1 S2 S2
783 * S1 S1 S1
784 * N N N
785 * N S1+S2 S2
786 * N S2 S2
787 * N S1 S1
788 *
789 * Note that you can't actually request stage-2 mappings.
790 */
791 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
792 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
793 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
794 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
795
Robin Murphy7602b872016-04-28 17:12:09 +0100796 /*
797 * Choosing a suitable context format is even more fiddly. Until we
798 * grow some way for the caller to express a preference, and/or move
799 * the decision into the io-pgtable code where it arguably belongs,
800 * just aim for the closest thing to the rest of the system, and hope
801 * that the hardware isn't esoteric enough that we can't assume AArch64
802 * support to be a superset of AArch32 support...
803 */
804 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
805 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100806 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
807 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
808 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
809 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
810 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100811 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
812 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
813 ARM_SMMU_FEAT_FMT_AARCH64_16K |
814 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
815 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
816
817 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
818 ret = -EINVAL;
819 goto out_unlock;
820 }
821
Will Deaconc752ce42014-06-25 22:46:31 +0100822 switch (smmu_domain->stage) {
823 case ARM_SMMU_DOMAIN_S1:
824 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
825 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000826 ias = smmu->va_size;
827 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100828 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000829 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +0100830 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +0000831 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100832 ias = min(ias, 32UL);
833 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +0100834 } else {
835 fmt = ARM_V7S;
836 ias = min(ias, 32UL);
837 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +0100838 }
Robin Murphy32b12442017-09-28 15:55:01 +0100839 smmu_domain->tlb_ops = &arm_smmu_s1_tlb_ops;
Will Deaconc752ce42014-06-25 22:46:31 +0100840 break;
841 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100842 /*
843 * We will likely want to change this if/when KVM gets
844 * involved.
845 */
Will Deaconc752ce42014-06-25 22:46:31 +0100846 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100847 cfg->cbar = CBAR_TYPE_S2_TRANS;
848 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000849 ias = smmu->ipa_size;
850 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100851 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000852 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100853 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000854 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100855 ias = min(ias, 40UL);
856 oas = min(oas, 40UL);
857 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100858 if (smmu->version == ARM_SMMU_V2)
Robin Murphy32b12442017-09-28 15:55:01 +0100859 smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v2;
Robin Murphy11febfc2017-03-30 17:56:31 +0100860 else
Robin Murphy32b12442017-09-28 15:55:01 +0100861 smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v1;
Will Deaconc752ce42014-06-25 22:46:31 +0100862 break;
863 default:
864 ret = -EINVAL;
865 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100866 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100867 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
868 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200869 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100870 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100871
Will Deacon44680ee2014-06-25 11:29:12 +0100872 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100873 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100874 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
875 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100876 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100877 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100878 }
879
Robin Murphy280b6832017-03-30 17:56:29 +0100880 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
881 cfg->vmid = cfg->cbndx + 1 + smmu->cavium_id_base;
882 else
883 cfg->asid = cfg->cbndx + smmu->cavium_id_base;
884
Will Deacon518f7132014-11-14 17:17:54 +0000885 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100886 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000887 .ias = ias,
888 .oas = oas,
Robin Murphy32b12442017-09-28 15:55:01 +0100889 .tlb = smmu_domain->tlb_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100890 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000891 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100892
Robin Murphy81b3c252017-06-22 16:53:53 +0100893 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
894 pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
895
Robin Murphy44f68762018-09-20 17:10:27 +0100896 if (smmu_domain->non_strict)
897 pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
898
Will Deacon518f7132014-11-14 17:17:54 +0000899 smmu_domain->smmu = smmu;
900 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
901 if (!pgtbl_ops) {
902 ret = -ENOMEM;
903 goto out_clear_smmu;
904 }
905
Robin Murphyd5466352016-05-09 17:20:09 +0100906 /* Update the domain's page sizes to reflect the page table format */
907 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Robin Murphy455eb7d2016-09-12 17:13:58 +0100908 domain->geometry.aperture_end = (1UL << ias) - 1;
909 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +0000910
911 /* Initialise the context bank with our page table cfg */
912 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
Robin Murphy90df3732017-08-08 14:56:14 +0100913 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000914
915 /*
916 * Request context fault interrupt. Do this last to avoid the
917 * handler seeing a half-initialised domain state.
918 */
Will Deacon44680ee2014-06-25 11:29:12 +0100919 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800920 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
921 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200922 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100923 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100924 cfg->irptndx, irq);
925 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100926 }
927
Will Deacon518f7132014-11-14 17:17:54 +0000928 mutex_unlock(&smmu_domain->init_mutex);
929
930 /* Publish page table ops for map/unmap */
931 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100932 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100933
Will Deacon518f7132014-11-14 17:17:54 +0000934out_clear_smmu:
935 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100936out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +0000937 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100938 return ret;
939}
940
941static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
942{
Joerg Roedel1d672632015-03-26 13:43:10 +0100943 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100944 struct arm_smmu_device *smmu = smmu_domain->smmu;
945 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Sricharan Rd4a44f02018-12-04 11:52:10 +0530946 int ret, irq;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100947
Will Deacon61bc6712017-01-06 16:56:03 +0000948 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100949 return;
950
Sricharan Rd4a44f02018-12-04 11:52:10 +0530951 ret = arm_smmu_rpm_get(smmu);
952 if (ret < 0)
953 return;
954
Will Deacon518f7132014-11-14 17:17:54 +0000955 /*
956 * Disable the context bank and free the page tables before freeing
957 * it.
958 */
Robin Murphy90df3732017-08-08 14:56:14 +0100959 smmu->cbs[cfg->cbndx].cfg = NULL;
960 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100961
Will Deacon44680ee2014-06-25 11:29:12 +0100962 if (cfg->irptndx != INVALID_IRPTNDX) {
963 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800964 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100965 }
966
Markus Elfring44830b02015-11-06 18:32:41 +0100967 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +0100968 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Sricharan Rd4a44f02018-12-04 11:52:10 +0530969
970 arm_smmu_rpm_put(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100971}
972
Joerg Roedel1d672632015-03-26 13:43:10 +0100973static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100974{
975 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100976
Will Deacon61bc6712017-01-06 16:56:03 +0000977 if (type != IOMMU_DOMAIN_UNMANAGED &&
978 type != IOMMU_DOMAIN_DMA &&
979 type != IOMMU_DOMAIN_IDENTITY)
Joerg Roedel1d672632015-03-26 13:43:10 +0100980 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100981 /*
982 * Allocate the domain and initialise some of its data structures.
983 * We can't really do anything meaningful until we've added a
984 * master.
985 */
986 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
987 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +0100988 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100989
Robin Murphy021bb842016-09-14 15:26:46 +0100990 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
991 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +0000992 kfree(smmu_domain);
993 return NULL;
994 }
995
Will Deacon518f7132014-11-14 17:17:54 +0000996 mutex_init(&smmu_domain->init_mutex);
Robin Murphy523d7422017-06-22 16:53:56 +0100997 spin_lock_init(&smmu_domain->cb_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +0100998
999 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001000}
1001
Joerg Roedel1d672632015-03-26 13:43:10 +01001002static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001003{
Joerg Roedel1d672632015-03-26 13:43:10 +01001004 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001005
1006 /*
1007 * Free the domain resources. We assume that all devices have
1008 * already been detached.
1009 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001010 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001011 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001012 kfree(smmu_domain);
1013}
1014
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001015static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1016{
1017 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyf80cd882016-09-14 15:21:39 +01001018 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001019
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001020 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001021 reg |= SMR_VALID;
1022 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1023}
1024
Robin Murphy8e8b2032016-09-12 17:13:50 +01001025static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1026{
1027 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1028 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1029 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1030 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1031
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001032 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
1033 smmu->smrs[idx].valid)
1034 reg |= S2CR_EXIDVALID;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001035 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1036}
1037
1038static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1039{
1040 arm_smmu_write_s2cr(smmu, idx);
1041 if (smmu->smrs)
1042 arm_smmu_write_smr(smmu, idx);
1043}
1044
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001045/*
1046 * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
1047 * should be called after sCR0 is written.
1048 */
1049static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
1050{
1051 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1052 u32 smr;
1053
1054 if (!smmu->smrs)
1055 return;
1056
1057 /*
1058 * SMR.ID bits may not be preserved if the corresponding MASK
1059 * bits are set, so check each one separately. We can reject
1060 * masters later if they try to claim IDs outside these masks.
1061 */
1062 smr = smmu->streamid_mask << SMR_ID_SHIFT;
1063 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1064 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1065 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
1066
1067 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1068 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1069 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1070 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
1071}
1072
Robin Murphy588888a2016-09-12 17:13:54 +01001073static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001074{
1075 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy588888a2016-09-12 17:13:54 +01001076 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001077
Robin Murphy588888a2016-09-12 17:13:54 +01001078 /* Stream indexing is blissfully easy */
1079 if (!smrs)
1080 return id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001081
Robin Murphy588888a2016-09-12 17:13:54 +01001082 /* Validating SMRs is... less so */
1083 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1084 if (!smrs[i].valid) {
1085 /*
1086 * Note the first free entry we come across, which
1087 * we'll claim in the end if nothing else matches.
1088 */
1089 if (free_idx < 0)
1090 free_idx = i;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001091 continue;
1092 }
Robin Murphy588888a2016-09-12 17:13:54 +01001093 /*
1094 * If the new entry is _entirely_ matched by an existing entry,
1095 * then reuse that, with the guarantee that there also cannot
1096 * be any subsequent conflicting entries. In normal use we'd
1097 * expect simply identical entries for this case, but there's
1098 * no harm in accommodating the generalisation.
1099 */
1100 if ((mask & smrs[i].mask) == mask &&
1101 !((id ^ smrs[i].id) & ~smrs[i].mask))
1102 return i;
1103 /*
1104 * If the new entry has any other overlap with an existing one,
1105 * though, then there always exists at least one stream ID
1106 * which would cause a conflict, and we can't allow that risk.
1107 */
1108 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1109 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001110 }
1111
Robin Murphy588888a2016-09-12 17:13:54 +01001112 return free_idx;
1113}
1114
1115static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1116{
1117 if (--smmu->s2crs[idx].count)
1118 return false;
1119
1120 smmu->s2crs[idx] = s2cr_init_val;
1121 if (smmu->smrs)
1122 smmu->smrs[idx].valid = false;
1123
1124 return true;
1125}
1126
1127static int arm_smmu_master_alloc_smes(struct device *dev)
1128{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001129 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1130 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy588888a2016-09-12 17:13:54 +01001131 struct arm_smmu_device *smmu = cfg->smmu;
1132 struct arm_smmu_smr *smrs = smmu->smrs;
1133 struct iommu_group *group;
1134 int i, idx, ret;
1135
1136 mutex_lock(&smmu->stream_map_mutex);
1137 /* Figure out a viable stream map entry allocation */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001138 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy021bb842016-09-14 15:26:46 +01001139 u16 sid = fwspec->ids[i];
1140 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1141
Robin Murphy588888a2016-09-12 17:13:54 +01001142 if (idx != INVALID_SMENDX) {
1143 ret = -EEXIST;
1144 goto out_err;
1145 }
1146
Robin Murphy021bb842016-09-14 15:26:46 +01001147 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy588888a2016-09-12 17:13:54 +01001148 if (ret < 0)
1149 goto out_err;
1150
1151 idx = ret;
1152 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy021bb842016-09-14 15:26:46 +01001153 smrs[idx].id = sid;
1154 smrs[idx].mask = mask;
Robin Murphy588888a2016-09-12 17:13:54 +01001155 smrs[idx].valid = true;
1156 }
1157 smmu->s2crs[idx].count++;
1158 cfg->smendx[i] = (s16)idx;
1159 }
1160
1161 group = iommu_group_get_for_dev(dev);
1162 if (!group)
1163 group = ERR_PTR(-ENOMEM);
1164 if (IS_ERR(group)) {
1165 ret = PTR_ERR(group);
1166 goto out_err;
1167 }
1168 iommu_group_put(group);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001169
Will Deacon45ae7cf2013-06-24 18:31:25 +01001170 /* It worked! Now, poke the actual hardware */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001171 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001172 arm_smmu_write_sme(smmu, idx);
1173 smmu->s2crs[idx].group = group;
1174 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001175
Robin Murphy588888a2016-09-12 17:13:54 +01001176 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001177 return 0;
1178
Robin Murphy588888a2016-09-12 17:13:54 +01001179out_err:
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001180 while (i--) {
Robin Murphy588888a2016-09-12 17:13:54 +01001181 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001182 cfg->smendx[i] = INVALID_SMENDX;
1183 }
Robin Murphy588888a2016-09-12 17:13:54 +01001184 mutex_unlock(&smmu->stream_map_mutex);
1185 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001186}
1187
Robin Murphyadfec2e2016-09-12 17:13:55 +01001188static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001189{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001190 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1191 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphyd3097e32016-09-12 17:13:53 +01001192 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001193
Robin Murphy588888a2016-09-12 17:13:54 +01001194 mutex_lock(&smmu->stream_map_mutex);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001195 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001196 if (arm_smmu_free_sme(smmu, idx))
1197 arm_smmu_write_sme(smmu, idx);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001198 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001199 }
Robin Murphy588888a2016-09-12 17:13:54 +01001200 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001201}
1202
Will Deacon45ae7cf2013-06-24 18:31:25 +01001203static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphyadfec2e2016-09-12 17:13:55 +01001204 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001205{
Will Deacon44680ee2014-06-25 11:29:12 +01001206 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001207 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001208 u8 cbndx = smmu_domain->cfg.cbndx;
Will Deacon61bc6712017-01-06 16:56:03 +00001209 enum arm_smmu_s2cr_type type;
Robin Murphy588888a2016-09-12 17:13:54 +01001210 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001211
Will Deacon61bc6712017-01-06 16:56:03 +00001212 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
1213 type = S2CR_TYPE_BYPASS;
1214 else
1215 type = S2CR_TYPE_TRANS;
1216
Robin Murphyadfec2e2016-09-12 17:13:55 +01001217 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy8e8b2032016-09-12 17:13:50 +01001218 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy588888a2016-09-12 17:13:54 +01001219 continue;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001220
Robin Murphy8e8b2032016-09-12 17:13:50 +01001221 s2cr[idx].type = type;
Sricharan Re1989802017-01-06 18:58:15 +05301222 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001223 s2cr[idx].cbndx = cbndx;
1224 arm_smmu_write_s2cr(smmu, idx);
Will Deacon43b412b2014-07-15 11:22:24 +01001225 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001226 return 0;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001227}
1228
Will Deacon45ae7cf2013-06-24 18:31:25 +01001229static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1230{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001231 int ret;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001232 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1233 struct arm_smmu_device *smmu;
Joerg Roedel1d672632015-03-26 13:43:10 +01001234 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001235
Robin Murphyadfec2e2016-09-12 17:13:55 +01001236 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001237 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1238 return -ENXIO;
1239 }
1240
Robin Murphyfba4f8e2016-10-17 12:06:21 +01001241 /*
1242 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1243 * domains between of_xlate() and add_device() - we have no way to cope
1244 * with that, so until ARM gets converted to rely on groups and default
1245 * domains, just say no (but more politely than by dereferencing NULL).
1246 * This should be at least a WARN_ON once that's sorted.
1247 */
1248 if (!fwspec->iommu_priv)
1249 return -ENODEV;
1250
Robin Murphyadfec2e2016-09-12 17:13:55 +01001251 smmu = fwspec_smmu(fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301252
1253 ret = arm_smmu_rpm_get(smmu);
1254 if (ret < 0)
1255 return ret;
1256
Will Deacon518f7132014-11-14 17:17:54 +00001257 /* Ensure that the domain is finalised */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001258 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001259 if (ret < 0)
Sricharan Rd4a44f02018-12-04 11:52:10 +05301260 goto rpm_put;
Will Deacon518f7132014-11-14 17:17:54 +00001261
Will Deacon45ae7cf2013-06-24 18:31:25 +01001262 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001263 * Sanity check the domain. We don't support domains across
1264 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001265 */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001266 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001267 dev_err(dev,
1268 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphyadfec2e2016-09-12 17:13:55 +01001269 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Sricharan Rd4a44f02018-12-04 11:52:10 +05301270 ret = -EINVAL;
1271 goto rpm_put;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001272 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001273
1274 /* Looks ok, so add the device to the domain */
Sricharan Rd4a44f02018-12-04 11:52:10 +05301275 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
1276
1277rpm_put:
1278 arm_smmu_rpm_put(smmu);
1279 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001280}
1281
Will Deacon45ae7cf2013-06-24 18:31:25 +01001282static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001283 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001284{
Robin Murphy523d7422017-06-22 16:53:56 +01001285 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301286 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1287 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001288
Will Deacon518f7132014-11-14 17:17:54 +00001289 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001290 return -ENODEV;
1291
Sricharan Rd4a44f02018-12-04 11:52:10 +05301292 arm_smmu_rpm_get(smmu);
1293 ret = ops->map(ops, iova, paddr, size, prot);
1294 arm_smmu_rpm_put(smmu);
1295
1296 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001297}
1298
1299static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1300 size_t size)
1301{
Robin Murphy523d7422017-06-22 16:53:56 +01001302 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301303 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1304 size_t ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001305
Will Deacon518f7132014-11-14 17:17:54 +00001306 if (!ops)
1307 return 0;
1308
Sricharan Rd4a44f02018-12-04 11:52:10 +05301309 arm_smmu_rpm_get(smmu);
1310 ret = ops->unmap(ops, iova, size);
1311 arm_smmu_rpm_put(smmu);
1312
1313 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001314}
1315
Robin Murphy44f68762018-09-20 17:10:27 +01001316static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
1317{
1318 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301319 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy44f68762018-09-20 17:10:27 +01001320
Sricharan Rd4a44f02018-12-04 11:52:10 +05301321 if (smmu_domain->tlb_ops) {
1322 arm_smmu_rpm_get(smmu);
Robin Murphy44f68762018-09-20 17:10:27 +01001323 smmu_domain->tlb_ops->tlb_flush_all(smmu_domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301324 arm_smmu_rpm_put(smmu);
1325 }
Robin Murphy44f68762018-09-20 17:10:27 +01001326}
1327
Robin Murphy32b12442017-09-28 15:55:01 +01001328static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
1329{
1330 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301331 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy32b12442017-09-28 15:55:01 +01001332
Sricharan Rd4a44f02018-12-04 11:52:10 +05301333 if (smmu_domain->tlb_ops) {
1334 arm_smmu_rpm_get(smmu);
Robin Murphy32b12442017-09-28 15:55:01 +01001335 smmu_domain->tlb_ops->tlb_sync(smmu_domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301336 arm_smmu_rpm_put(smmu);
1337 }
Robin Murphy32b12442017-09-28 15:55:01 +01001338}
1339
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001340static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1341 dma_addr_t iova)
1342{
Joerg Roedel1d672632015-03-26 13:43:10 +01001343 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001344 struct arm_smmu_device *smmu = smmu_domain->smmu;
1345 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1346 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1347 struct device *dev = smmu->dev;
1348 void __iomem *cb_base;
1349 u32 tmp;
1350 u64 phys;
Robin Murphy523d7422017-06-22 16:53:56 +01001351 unsigned long va, flags;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301352 int ret;
1353
1354 ret = arm_smmu_rpm_get(smmu);
1355 if (ret < 0)
1356 return 0;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001357
Robin Murphy452107c2017-03-30 17:56:30 +01001358 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001359
Robin Murphy523d7422017-06-22 16:53:56 +01001360 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy661d9622015-05-27 17:09:34 +01001361 /* ATS1 registers can only be written atomically */
1362 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001363 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001364 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1365 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001366 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001367
1368 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1369 !(tmp & ATSR_ACTIVE), 5, 50)) {
Robin Murphy523d7422017-06-22 16:53:56 +01001370 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001371 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001372 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001373 &iova);
1374 return ops->iova_to_phys(ops, iova);
1375 }
1376
Robin Murphyf9a05f02016-04-13 18:13:01 +01001377 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Robin Murphy523d7422017-06-22 16:53:56 +01001378 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001379 if (phys & CB_PAR_F) {
1380 dev_err(dev, "translation fault!\n");
1381 dev_err(dev, "PAR = 0x%llx\n", phys);
1382 return 0;
1383 }
1384
Sricharan Rd4a44f02018-12-04 11:52:10 +05301385 arm_smmu_rpm_put(smmu);
1386
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001387 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1388}
1389
Will Deacon45ae7cf2013-06-24 18:31:25 +01001390static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001391 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001392{
Joerg Roedel1d672632015-03-26 13:43:10 +01001393 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Robin Murphy523d7422017-06-22 16:53:56 +01001394 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001395
Sunil Gouthambdf95922017-04-25 15:27:52 +05301396 if (domain->type == IOMMU_DOMAIN_IDENTITY)
1397 return iova;
1398
Will Deacon518f7132014-11-14 17:17:54 +00001399 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001400 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001401
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001402 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
Robin Murphy523d7422017-06-22 16:53:56 +01001403 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1404 return arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001405
Robin Murphy523d7422017-06-22 16:53:56 +01001406 return ops->iova_to_phys(ops, iova);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001407}
1408
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001409static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001410{
Will Deacond0948942014-06-24 17:30:10 +01001411 switch (cap) {
1412 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001413 /*
1414 * Return true here as the SMMU can always send out coherent
1415 * requests.
1416 */
1417 return true;
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001418 case IOMMU_CAP_NOEXEC:
1419 return true;
Will Deacond0948942014-06-24 17:30:10 +01001420 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001421 return false;
Will Deacond0948942014-06-24 17:30:10 +01001422 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001423}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001424
Robin Murphy021bb842016-09-14 15:26:46 +01001425static int arm_smmu_match_node(struct device *dev, void *data)
1426{
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001427 return dev->fwnode == data;
Robin Murphy021bb842016-09-14 15:26:46 +01001428}
1429
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001430static
1431struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
Robin Murphy021bb842016-09-14 15:26:46 +01001432{
1433 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001434 fwnode, arm_smmu_match_node);
Robin Murphy021bb842016-09-14 15:26:46 +01001435 put_device(dev);
1436 return dev ? dev_get_drvdata(dev) : NULL;
1437}
1438
Will Deacon03edb222015-01-19 14:27:33 +00001439static int arm_smmu_add_device(struct device *dev)
1440{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001441 struct arm_smmu_device *smmu;
Robin Murphyf80cd882016-09-14 15:21:39 +01001442 struct arm_smmu_master_cfg *cfg;
Robin Murphy021bb842016-09-14 15:26:46 +01001443 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphyf80cd882016-09-14 15:21:39 +01001444 int i, ret;
1445
Robin Murphy021bb842016-09-14 15:26:46 +01001446 if (using_legacy_binding) {
1447 ret = arm_smmu_register_legacy_master(dev, &smmu);
Artem Savkova7990c62017-08-08 12:26:02 +02001448
1449 /*
1450 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
1451 * will allocate/initialise a new one. Thus we need to update fwspec for
1452 * later use.
1453 */
1454 fwspec = dev->iommu_fwspec;
Robin Murphy021bb842016-09-14 15:26:46 +01001455 if (ret)
1456 goto out_free;
Robin Murphy3c117b52016-11-02 17:31:32 +00001457 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001458 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
Robin Murphy021bb842016-09-14 15:26:46 +01001459 } else {
1460 return -ENODEV;
1461 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001462
1463 ret = -EINVAL;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001464 for (i = 0; i < fwspec->num_ids; i++) {
1465 u16 sid = fwspec->ids[i];
Robin Murphy021bb842016-09-14 15:26:46 +01001466 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyf80cd882016-09-14 15:21:39 +01001467
Robin Murphyadfec2e2016-09-12 17:13:55 +01001468 if (sid & ~smmu->streamid_mask) {
Robin Murphyf80cd882016-09-14 15:21:39 +01001469 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy021bb842016-09-14 15:26:46 +01001470 sid, smmu->streamid_mask);
1471 goto out_free;
1472 }
1473 if (mask & ~smmu->smr_mask_mask) {
1474 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
Peng Fan6323f472017-04-21 17:03:36 +08001475 mask, smmu->smr_mask_mask);
Robin Murphyf80cd882016-09-14 15:21:39 +01001476 goto out_free;
1477 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001478 }
Will Deacon03edb222015-01-19 14:27:33 +00001479
Robin Murphyadfec2e2016-09-12 17:13:55 +01001480 ret = -ENOMEM;
1481 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1482 GFP_KERNEL);
1483 if (!cfg)
1484 goto out_free;
1485
1486 cfg->smmu = smmu;
1487 fwspec->iommu_priv = cfg;
1488 while (i--)
1489 cfg->smendx[i] = INVALID_SMENDX;
1490
Sricharan Rd4a44f02018-12-04 11:52:10 +05301491 ret = arm_smmu_rpm_get(smmu);
1492 if (ret < 0)
1493 goto out_cfg_free;
1494
Robin Murphy588888a2016-09-12 17:13:54 +01001495 ret = arm_smmu_master_alloc_smes(dev);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301496 arm_smmu_rpm_put(smmu);
1497
Robin Murphyadfec2e2016-09-12 17:13:55 +01001498 if (ret)
Vivek Gautamc54451a2017-07-06 15:07:00 +05301499 goto out_cfg_free;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001500
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001501 iommu_device_link(&smmu->iommu, dev);
1502
Robin Murphyadfec2e2016-09-12 17:13:55 +01001503 return 0;
Robin Murphyf80cd882016-09-14 15:21:39 +01001504
Vivek Gautamc54451a2017-07-06 15:07:00 +05301505out_cfg_free:
1506 kfree(cfg);
Robin Murphyf80cd882016-09-14 15:21:39 +01001507out_free:
Robin Murphyadfec2e2016-09-12 17:13:55 +01001508 iommu_fwspec_free(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001509 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00001510}
1511
Will Deacon45ae7cf2013-06-24 18:31:25 +01001512static void arm_smmu_remove_device(struct device *dev)
1513{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001514 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001515 struct arm_smmu_master_cfg *cfg;
1516 struct arm_smmu_device *smmu;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301517 int ret;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001518
Robin Murphyadfec2e2016-09-12 17:13:55 +01001519 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyf80cd882016-09-14 15:21:39 +01001520 return;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001521
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001522 cfg = fwspec->iommu_priv;
1523 smmu = cfg->smmu;
1524
Sricharan Rd4a44f02018-12-04 11:52:10 +05301525 ret = arm_smmu_rpm_get(smmu);
1526 if (ret < 0)
1527 return;
1528
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001529 iommu_device_unlink(&smmu->iommu, dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001530 arm_smmu_master_free_smes(fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301531
1532 arm_smmu_rpm_put(smmu);
1533
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001534 iommu_group_remove_device(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001535 kfree(fwspec->iommu_priv);
1536 iommu_fwspec_free(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001537}
1538
Joerg Roedelaf659932015-10-21 23:51:41 +02001539static struct iommu_group *arm_smmu_device_group(struct device *dev)
1540{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001541 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1542 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy588888a2016-09-12 17:13:54 +01001543 struct iommu_group *group = NULL;
1544 int i, idx;
1545
Robin Murphyadfec2e2016-09-12 17:13:55 +01001546 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001547 if (group && smmu->s2crs[idx].group &&
1548 group != smmu->s2crs[idx].group)
1549 return ERR_PTR(-EINVAL);
1550
1551 group = smmu->s2crs[idx].group;
1552 }
1553
1554 if (group)
Robin Murphye1b44cb2016-11-11 17:59:22 +00001555 return iommu_group_ref_get(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001556
1557 if (dev_is_pci(dev))
1558 group = pci_device_group(dev);
Nipun Guptaeab03e22018-09-10 19:19:18 +05301559 else if (dev_is_fsl_mc(dev))
1560 group = fsl_mc_device_group(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001561 else
1562 group = generic_device_group(dev);
1563
Joerg Roedelaf659932015-10-21 23:51:41 +02001564 return group;
1565}
1566
Will Deaconc752ce42014-06-25 22:46:31 +01001567static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1568 enum iommu_attr attr, void *data)
1569{
Joerg Roedel1d672632015-03-26 13:43:10 +01001570 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001571
Robin Murphy44f68762018-09-20 17:10:27 +01001572 switch(domain->type) {
1573 case IOMMU_DOMAIN_UNMANAGED:
1574 switch (attr) {
1575 case DOMAIN_ATTR_NESTING:
1576 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1577 return 0;
1578 default:
1579 return -ENODEV;
1580 }
1581 break;
1582 case IOMMU_DOMAIN_DMA:
1583 switch (attr) {
1584 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1585 *(int *)data = smmu_domain->non_strict;
1586 return 0;
1587 default:
1588 return -ENODEV;
1589 }
1590 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001591 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001592 return -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001593 }
1594}
1595
1596static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1597 enum iommu_attr attr, void *data)
1598{
Will Deacon518f7132014-11-14 17:17:54 +00001599 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001600 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001601
Will Deacon518f7132014-11-14 17:17:54 +00001602 mutex_lock(&smmu_domain->init_mutex);
1603
Robin Murphy44f68762018-09-20 17:10:27 +01001604 switch(domain->type) {
1605 case IOMMU_DOMAIN_UNMANAGED:
1606 switch (attr) {
1607 case DOMAIN_ATTR_NESTING:
1608 if (smmu_domain->smmu) {
1609 ret = -EPERM;
1610 goto out_unlock;
1611 }
1612
1613 if (*(int *)data)
1614 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1615 else
1616 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1617 break;
1618 default:
1619 ret = -ENODEV;
Will Deacon518f7132014-11-14 17:17:54 +00001620 }
Robin Murphy44f68762018-09-20 17:10:27 +01001621 break;
1622 case IOMMU_DOMAIN_DMA:
1623 switch (attr) {
1624 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1625 smmu_domain->non_strict = *(int *)data;
1626 break;
1627 default:
1628 ret = -ENODEV;
1629 }
Will Deacon518f7132014-11-14 17:17:54 +00001630 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001631 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001632 ret = -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001633 }
Will Deacon518f7132014-11-14 17:17:54 +00001634out_unlock:
1635 mutex_unlock(&smmu_domain->init_mutex);
1636 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001637}
1638
Robin Murphy021bb842016-09-14 15:26:46 +01001639static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1640{
Robin Murphy56fbf602017-03-31 12:03:33 +01001641 u32 mask, fwid = 0;
Robin Murphy021bb842016-09-14 15:26:46 +01001642
1643 if (args->args_count > 0)
1644 fwid |= (u16)args->args[0];
1645
1646 if (args->args_count > 1)
1647 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
Robin Murphy56fbf602017-03-31 12:03:33 +01001648 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
1649 fwid |= (u16)mask << SMR_MASK_SHIFT;
Robin Murphy021bb842016-09-14 15:26:46 +01001650
1651 return iommu_fwspec_add_ids(dev, &fwid, 1);
1652}
1653
Eric Augerf3ebee82017-01-19 20:57:55 +00001654static void arm_smmu_get_resv_regions(struct device *dev,
1655 struct list_head *head)
1656{
1657 struct iommu_resv_region *region;
1658 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1659
1660 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00001661 prot, IOMMU_RESV_SW_MSI);
Eric Augerf3ebee82017-01-19 20:57:55 +00001662 if (!region)
1663 return;
1664
1665 list_add_tail(&region->list, head);
Robin Murphy273df962017-03-16 17:00:19 +00001666
1667 iommu_dma_get_resv_regions(dev, head);
Eric Augerf3ebee82017-01-19 20:57:55 +00001668}
1669
1670static void arm_smmu_put_resv_regions(struct device *dev,
1671 struct list_head *head)
1672{
1673 struct iommu_resv_region *entry, *next;
1674
1675 list_for_each_entry_safe(entry, next, head, list)
1676 kfree(entry);
1677}
1678
Will Deacon518f7132014-11-14 17:17:54 +00001679static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001680 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001681 .domain_alloc = arm_smmu_domain_alloc,
1682 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001683 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001684 .map = arm_smmu_map,
1685 .unmap = arm_smmu_unmap,
Robin Murphy44f68762018-09-20 17:10:27 +01001686 .flush_iotlb_all = arm_smmu_flush_iotlb_all,
Robin Murphy32b12442017-09-28 15:55:01 +01001687 .iotlb_sync = arm_smmu_iotlb_sync,
Will Deaconc752ce42014-06-25 22:46:31 +01001688 .iova_to_phys = arm_smmu_iova_to_phys,
1689 .add_device = arm_smmu_add_device,
1690 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001691 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001692 .domain_get_attr = arm_smmu_domain_get_attr,
1693 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy021bb842016-09-14 15:26:46 +01001694 .of_xlate = arm_smmu_of_xlate,
Eric Augerf3ebee82017-01-19 20:57:55 +00001695 .get_resv_regions = arm_smmu_get_resv_regions,
1696 .put_resv_regions = arm_smmu_put_resv_regions,
Will Deacon518f7132014-11-14 17:17:54 +00001697 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001698};
1699
1700static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1701{
1702 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001703 int i;
Peng Fan3ca37122016-05-03 21:50:30 +08001704 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001705
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001706 /* clear global FSR */
1707 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1708 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001709
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001710 /*
1711 * Reset stream mapping groups: Initial values mark all SMRn as
1712 * invalid and all S2CRn as bypass unless overridden.
1713 */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001714 for (i = 0; i < smmu->num_mapping_groups; ++i)
1715 arm_smmu_write_sme(smmu, i);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001716
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301717 if (smmu->model == ARM_MMU500) {
1718 /*
1719 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1720 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1721 * bit is only present in MMU-500r2 onwards.
1722 */
1723 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1724 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
Peng Fan3ca37122016-05-03 21:50:30 +08001725 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301726 if (major >= 2)
1727 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1728 /*
1729 * Allow unmatched Stream IDs to allocate bypass
1730 * TLB entries for reduced latency.
1731 */
Feng Kan74f55d32017-10-11 15:08:39 -07001732 reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN;
Peng Fan3ca37122016-05-03 21:50:30 +08001733 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1734 }
1735
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001736 /* Make sure all context banks are disabled and clear CB_FSR */
1737 for (i = 0; i < smmu->num_context_banks; ++i) {
Robin Murphy90df3732017-08-08 14:56:14 +01001738 void __iomem *cb_base = ARM_SMMU_CB(smmu, i);
1739
1740 arm_smmu_write_context_bank(smmu, i);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001741 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001742 /*
1743 * Disable MMU-500's not-particularly-beneficial next-page
1744 * prefetcher for the sake of errata #841119 and #826419.
1745 */
1746 if (smmu->model == ARM_MMU500) {
1747 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1748 reg &= ~ARM_MMU500_ACTLR_CPRE;
1749 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1750 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001751 }
Will Deacon1463fe42013-07-31 19:21:27 +01001752
Will Deacon45ae7cf2013-06-24 18:31:25 +01001753 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001754 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1755 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1756
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001757 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001758
Will Deacon45ae7cf2013-06-24 18:31:25 +01001759 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001760 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001761
1762 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001763 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001764
Robin Murphy25a1c962016-02-10 14:25:33 +00001765 /* Enable client access, handling unmatched streams as appropriate */
1766 reg &= ~sCR0_CLIENTPD;
1767 if (disable_bypass)
1768 reg |= sCR0_USFCFG;
1769 else
1770 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001771
1772 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001773 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001774
1775 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001776 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001777
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001778 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1779 reg |= sCR0_VMID16EN;
1780
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001781 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1782 reg |= sCR0_EXIDENABLE;
1783
Will Deacon45ae7cf2013-06-24 18:31:25 +01001784 /* Push the button */
Robin Murphy11febfc2017-03-30 17:56:31 +01001785 arm_smmu_tlb_sync_global(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001786 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001787}
1788
1789static int arm_smmu_id_size_to_bits(int size)
1790{
1791 switch (size) {
1792 case 0:
1793 return 32;
1794 case 1:
1795 return 36;
1796 case 2:
1797 return 40;
1798 case 3:
1799 return 42;
1800 case 4:
1801 return 44;
1802 case 5:
1803 default:
1804 return 48;
1805 }
1806}
1807
1808static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1809{
1810 unsigned long size;
1811 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1812 u32 id;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001813 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001814 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001815
1816 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001817 dev_notice(smmu->dev, "SMMUv%d with:\n",
1818 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001819
1820 /* ID0 */
1821 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001822
1823 /* Restrict available stages based on module parameter */
1824 if (force_stage == 1)
1825 id &= ~(ID0_S2TS | ID0_NTS);
1826 else if (force_stage == 2)
1827 id &= ~(ID0_S1TS | ID0_NTS);
1828
Will Deacon45ae7cf2013-06-24 18:31:25 +01001829 if (id & ID0_S1TS) {
1830 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1831 dev_notice(smmu->dev, "\tstage 1 translation\n");
1832 }
1833
1834 if (id & ID0_S2TS) {
1835 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1836 dev_notice(smmu->dev, "\tstage 2 translation\n");
1837 }
1838
1839 if (id & ID0_NTS) {
1840 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1841 dev_notice(smmu->dev, "\tnested translation\n");
1842 }
1843
1844 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001845 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001846 dev_err(smmu->dev, "\tno translation support!\n");
1847 return -ENODEV;
1848 }
1849
Robin Murphyb7862e32016-04-13 18:13:03 +01001850 if ((id & ID0_S1TS) &&
1851 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001852 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1853 dev_notice(smmu->dev, "\taddress translation ops\n");
1854 }
1855
Robin Murphybae2c2d2015-07-29 19:46:05 +01001856 /*
1857 * In order for DMA API calls to work properly, we must defer to what
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001858 * the FW says about coherency, regardless of what the hardware claims.
Robin Murphybae2c2d2015-07-29 19:46:05 +01001859 * Fortunately, this also opens up a workaround for systems where the
1860 * ID register value has ended up configured incorrectly.
1861 */
Robin Murphybae2c2d2015-07-29 19:46:05 +01001862 cttw_reg = !!(id & ID0_CTTW);
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001863 if (cttw_fw || cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001864 dev_notice(smmu->dev, "\t%scoherent table walk\n",
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001865 cttw_fw ? "" : "non-");
1866 if (cttw_fw != cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001867 dev_notice(smmu->dev,
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001868 "\t(IDR0.CTTW overridden by FW configuration)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001869
Robin Murphy21174242016-09-12 17:13:48 +01001870 /* Max. number of entries we have for stream matching/indexing */
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001871 if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
1872 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1873 size = 1 << 16;
1874 } else {
1875 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
1876 }
Robin Murphy21174242016-09-12 17:13:48 +01001877 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001878 if (id & ID0_SMS) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001879 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy21174242016-09-12 17:13:48 +01001880 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
1881 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001882 dev_err(smmu->dev,
1883 "stream-matching supported, but no SMRs present!\n");
1884 return -ENODEV;
1885 }
1886
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001887 /* Zero-initialised to mark as invalid */
1888 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1889 GFP_KERNEL);
1890 if (!smmu->smrs)
1891 return -ENOMEM;
1892
Will Deacon45ae7cf2013-06-24 18:31:25 +01001893 dev_notice(smmu->dev,
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001894 "\tstream matching with %lu register groups", size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001895 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001896 /* s2cr->type == 0 means translation, so initialise explicitly */
1897 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1898 GFP_KERNEL);
1899 if (!smmu->s2crs)
1900 return -ENOMEM;
1901 for (i = 0; i < size; i++)
1902 smmu->s2crs[i] = s2cr_init_val;
1903
Robin Murphy21174242016-09-12 17:13:48 +01001904 smmu->num_mapping_groups = size;
Robin Murphy588888a2016-09-12 17:13:54 +01001905 mutex_init(&smmu->stream_map_mutex);
Will Deacon8e517e72017-07-06 15:55:48 +01001906 spin_lock_init(&smmu->global_sync_lock);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001907
Robin Murphy7602b872016-04-28 17:12:09 +01001908 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1909 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1910 if (!(id & ID0_PTFS_NO_AARCH32S))
1911 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1912 }
1913
Will Deacon45ae7cf2013-06-24 18:31:25 +01001914 /* ID1 */
1915 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001916 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001917
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001918 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001919 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Robin Murphy452107c2017-03-30 17:56:30 +01001920 size <<= smmu->pgshift;
1921 if (smmu->cb_base != gr0_base + size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001922 dev_warn(smmu->dev,
Robin Murphy452107c2017-03-30 17:56:30 +01001923 "SMMU address space size (0x%lx) differs from mapped region size (0x%tx)!\n",
1924 size * 2, (smmu->cb_base - gr0_base) * 2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001925
Will Deacon518f7132014-11-14 17:17:54 +00001926 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001927 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1928 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1929 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1930 return -ENODEV;
1931 }
1932 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1933 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001934 /*
1935 * Cavium CN88xx erratum #27704.
1936 * Ensure ASID and VMID allocation is unique across all SMMUs in
1937 * the system.
1938 */
1939 if (smmu->model == CAVIUM_SMMUV2) {
1940 smmu->cavium_id_base =
1941 atomic_add_return(smmu->num_context_banks,
1942 &cavium_smmu_context_count);
1943 smmu->cavium_id_base -= smmu->num_context_banks;
Robert Richter53c35dce2017-03-13 11:39:01 +01001944 dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
Robin Murphye086d912016-04-13 18:12:58 +01001945 }
Robin Murphy90df3732017-08-08 14:56:14 +01001946 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
1947 sizeof(*smmu->cbs), GFP_KERNEL);
1948 if (!smmu->cbs)
1949 return -ENOMEM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001950
1951 /* ID2 */
1952 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1953 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001954 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001955
Will Deacon518f7132014-11-14 17:17:54 +00001956 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001957 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001958 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001959
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001960 if (id & ID2_VMID16)
1961 smmu->features |= ARM_SMMU_FEAT_VMID16;
1962
Robin Murphyf1d84542015-03-04 16:41:05 +00001963 /*
1964 * What the page table walker can address actually depends on which
1965 * descriptor format is in use, but since a) we don't know that yet,
1966 * and b) it can vary per context bank, this will have to do...
1967 */
1968 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1969 dev_warn(smmu->dev,
1970 "failed to set DMA mask for table walker\n");
1971
Robin Murphyb7862e32016-04-13 18:13:03 +01001972 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001973 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001974 if (smmu->version == ARM_SMMU_V1_64K)
1975 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001976 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001977 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00001978 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001979 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001980 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001981 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001982 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001983 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001984 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001985 }
1986
Robin Murphy7602b872016-04-28 17:12:09 +01001987 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01001988 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01001989 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01001990 if (smmu->features &
1991 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01001992 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01001993 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01001994 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01001995 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01001996 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01001997
Robin Murphyd5466352016-05-09 17:20:09 +01001998 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1999 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2000 else
2001 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2002 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
2003 smmu->pgsize_bitmap);
2004
Will Deacon518f7132014-11-14 17:17:54 +00002005
Will Deacon28d60072014-09-01 16:24:48 +01002006 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
2007 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002008 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002009
2010 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
2011 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002012 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002013
Will Deacon45ae7cf2013-06-24 18:31:25 +01002014 return 0;
2015}
2016
Robin Murphy67b65a32016-04-13 18:12:57 +01002017struct arm_smmu_match_data {
2018 enum arm_smmu_arch_version version;
2019 enum arm_smmu_implementation model;
2020};
2021
2022#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
Sricharan R96a299d2018-12-04 11:52:09 +05302023static const struct arm_smmu_match_data name = { .version = ver, .model = imp }
Robin Murphy67b65a32016-04-13 18:12:57 +01002024
2025ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
2026ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01002027ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002028ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01002029ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01002030
Joerg Roedel09b52692014-10-02 12:24:45 +02002031static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01002032 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
2033 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
2034 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01002035 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002036 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01002037 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01002038 { },
2039};
2040MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2041
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002042#ifdef CONFIG_ACPI
2043static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
2044{
2045 int ret = 0;
2046
2047 switch (model) {
2048 case ACPI_IORT_SMMU_V1:
2049 case ACPI_IORT_SMMU_CORELINK_MMU400:
2050 smmu->version = ARM_SMMU_V1;
2051 smmu->model = GENERIC_SMMU;
2052 break;
Robin Murphy84c24372017-06-19 16:41:56 +01002053 case ACPI_IORT_SMMU_CORELINK_MMU401:
2054 smmu->version = ARM_SMMU_V1_64K;
2055 smmu->model = GENERIC_SMMU;
2056 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002057 case ACPI_IORT_SMMU_V2:
2058 smmu->version = ARM_SMMU_V2;
2059 smmu->model = GENERIC_SMMU;
2060 break;
2061 case ACPI_IORT_SMMU_CORELINK_MMU500:
2062 smmu->version = ARM_SMMU_V2;
2063 smmu->model = ARM_MMU500;
2064 break;
Robin Murphy84c24372017-06-19 16:41:56 +01002065 case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
2066 smmu->version = ARM_SMMU_V2;
2067 smmu->model = CAVIUM_SMMUV2;
2068 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002069 default:
2070 ret = -ENODEV;
2071 }
2072
2073 return ret;
2074}
2075
2076static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2077 struct arm_smmu_device *smmu)
2078{
2079 struct device *dev = smmu->dev;
2080 struct acpi_iort_node *node =
2081 *(struct acpi_iort_node **)dev_get_platdata(dev);
2082 struct acpi_iort_smmu *iort_smmu;
2083 int ret;
2084
2085 /* Retrieve SMMU1/2 specific data */
2086 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
2087
2088 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
2089 if (ret < 0)
2090 return ret;
2091
2092 /* Ignore the configuration access interrupt */
2093 smmu->num_global_irqs = 1;
2094
2095 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
2096 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2097
2098 return 0;
2099}
2100#else
2101static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2102 struct arm_smmu_device *smmu)
2103{
2104 return -ENODEV;
2105}
2106#endif
2107
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002108static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2109 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002110{
Robin Murphy67b65a32016-04-13 18:12:57 +01002111 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002112 struct device *dev = &pdev->dev;
Robin Murphy021bb842016-09-14 15:26:46 +01002113 bool legacy_binding;
2114
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002115 if (of_property_read_u32(dev->of_node, "#global-interrupts",
2116 &smmu->num_global_irqs)) {
2117 dev_err(dev, "missing #global-interrupts property\n");
2118 return -ENODEV;
2119 }
2120
2121 data = of_device_get_match_data(dev);
2122 smmu->version = data->version;
2123 smmu->model = data->model;
2124
2125 parse_driver_options(smmu);
2126
Robin Murphy021bb842016-09-14 15:26:46 +01002127 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
2128 if (legacy_binding && !using_generic_binding) {
2129 if (!using_legacy_binding)
2130 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
2131 using_legacy_binding = true;
2132 } else if (!legacy_binding && !using_legacy_binding) {
2133 using_generic_binding = true;
2134 } else {
2135 dev_err(dev, "not probing due to mismatched DT properties\n");
2136 return -ENODEV;
2137 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002138
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002139 if (of_dma_is_coherent(dev->of_node))
2140 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2141
2142 return 0;
2143}
2144
Robin Murphyf6810c12017-04-10 16:51:05 +05302145static void arm_smmu_bus_init(void)
2146{
2147 /* Oh, for a proper bus abstraction */
2148 if (!iommu_present(&platform_bus_type))
2149 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2150#ifdef CONFIG_ARM_AMBA
2151 if (!iommu_present(&amba_bustype))
2152 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2153#endif
2154#ifdef CONFIG_PCI
2155 if (!iommu_present(&pci_bus_type)) {
2156 pci_request_acs();
2157 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2158 }
2159#endif
Nipun Guptaeab03e22018-09-10 19:19:18 +05302160#ifdef CONFIG_FSL_MC_BUS
2161 if (!iommu_present(&fsl_mc_bus_type))
2162 bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
2163#endif
Robin Murphyf6810c12017-04-10 16:51:05 +05302164}
2165
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002166static int arm_smmu_device_probe(struct platform_device *pdev)
2167{
2168 struct resource *res;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002169 resource_size_t ioaddr;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002170 struct arm_smmu_device *smmu;
2171 struct device *dev = &pdev->dev;
2172 int num_irqs, i, err;
2173
Will Deacon45ae7cf2013-06-24 18:31:25 +01002174 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2175 if (!smmu) {
2176 dev_err(dev, "failed to allocate arm_smmu_device\n");
2177 return -ENOMEM;
2178 }
2179 smmu->dev = dev;
2180
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002181 if (dev->of_node)
2182 err = arm_smmu_device_dt_probe(pdev, smmu);
2183 else
2184 err = arm_smmu_device_acpi_probe(pdev, smmu);
2185
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002186 if (err)
2187 return err;
Robin Murphy09360402014-08-28 17:51:59 +01002188
Will Deacon45ae7cf2013-06-24 18:31:25 +01002189 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002190 ioaddr = res->start;
Julia Lawall8a7f4312013-08-19 12:20:37 +01002191 smmu->base = devm_ioremap_resource(dev, res);
2192 if (IS_ERR(smmu->base))
2193 return PTR_ERR(smmu->base);
Robin Murphy452107c2017-03-30 17:56:30 +01002194 smmu->cb_base = smmu->base + resource_size(res) / 2;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002195
Will Deacon45ae7cf2013-06-24 18:31:25 +01002196 num_irqs = 0;
2197 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2198 num_irqs++;
2199 if (num_irqs > smmu->num_global_irqs)
2200 smmu->num_context_irqs++;
2201 }
2202
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002203 if (!smmu->num_context_irqs) {
2204 dev_err(dev, "found %d interrupts but expected at least %d\n",
2205 num_irqs, smmu->num_global_irqs + 1);
2206 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002207 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002208
Kees Cooka86854d2018-06-12 14:07:58 -07002209 smmu->irqs = devm_kcalloc(dev, num_irqs, sizeof(*smmu->irqs),
Will Deacon45ae7cf2013-06-24 18:31:25 +01002210 GFP_KERNEL);
2211 if (!smmu->irqs) {
2212 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2213 return -ENOMEM;
2214 }
2215
2216 for (i = 0; i < num_irqs; ++i) {
2217 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002218
Will Deacon45ae7cf2013-06-24 18:31:25 +01002219 if (irq < 0) {
2220 dev_err(dev, "failed to get irq index %d\n", i);
2221 return -ENODEV;
2222 }
2223 smmu->irqs[i] = irq;
2224 }
2225
Sricharan R96a299d2018-12-04 11:52:09 +05302226 err = devm_clk_bulk_get_all(dev, &smmu->clks);
2227 if (err < 0) {
2228 dev_err(dev, "failed to get clocks %d\n", err);
2229 return err;
2230 }
2231 smmu->num_clks = err;
2232
2233 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
2234 if (err)
2235 return err;
2236
Olav Haugan3c8766d2014-08-22 17:12:32 -07002237 err = arm_smmu_device_cfg_probe(smmu);
2238 if (err)
2239 return err;
2240
Vivek Gautamd1e20222018-07-19 23:23:56 +05302241 if (smmu->version == ARM_SMMU_V2) {
2242 if (smmu->num_context_banks > smmu->num_context_irqs) {
2243 dev_err(dev,
2244 "found only %d context irq(s) but %d required\n",
2245 smmu->num_context_irqs, smmu->num_context_banks);
2246 return -ENODEV;
2247 }
2248
2249 /* Ignore superfluous interrupts */
2250 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002251 }
2252
Will Deacon45ae7cf2013-06-24 18:31:25 +01002253 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08002254 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2255 arm_smmu_global_fault,
2256 IRQF_SHARED,
2257 "arm-smmu global fault",
2258 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002259 if (err) {
2260 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2261 i, smmu->irqs[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01002262 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002263 }
2264 }
2265
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002266 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2267 "smmu.%pa", &ioaddr);
2268 if (err) {
2269 dev_err(dev, "Failed to register iommu in sysfs\n");
2270 return err;
2271 }
2272
2273 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2274 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2275
2276 err = iommu_device_register(&smmu->iommu);
2277 if (err) {
2278 dev_err(dev, "Failed to register iommu\n");
2279 return err;
2280 }
2281
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002282 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01002283 arm_smmu_device_reset(smmu);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03002284 arm_smmu_test_smr_masks(smmu);
Robin Murphy021bb842016-09-14 15:26:46 +01002285
Robin Murphyf6810c12017-04-10 16:51:05 +05302286 /*
Sricharan Rd4a44f02018-12-04 11:52:10 +05302287 * We want to avoid touching dev->power.lock in fastpaths unless
2288 * it's really going to do something useful - pm_runtime_enabled()
2289 * can serve as an ideal proxy for that decision. So, conditionally
2290 * enable pm_runtime.
2291 */
2292 if (dev->pm_domain) {
2293 pm_runtime_set_active(dev);
2294 pm_runtime_enable(dev);
2295 }
2296
2297 /*
Robin Murphyf6810c12017-04-10 16:51:05 +05302298 * For ACPI and generic DT bindings, an SMMU will be probed before
2299 * any device which might need it, so we want the bus ops in place
2300 * ready to handle default domain setup as soon as any SMMU exists.
2301 */
2302 if (!using_legacy_binding)
2303 arm_smmu_bus_init();
2304
Will Deacon45ae7cf2013-06-24 18:31:25 +01002305 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002306}
2307
Robin Murphyf6810c12017-04-10 16:51:05 +05302308/*
2309 * With the legacy DT binding in play, though, we have no guarantees about
2310 * probe order, but then we're also not doing default domains, so we can
2311 * delay setting bus ops until we're sure every possible SMMU is ready,
2312 * and that way ensure that no add_device() calls get missed.
2313 */
2314static int arm_smmu_legacy_bus_init(void)
2315{
2316 if (using_legacy_binding)
2317 arm_smmu_bus_init();
2318 return 0;
2319}
2320device_initcall_sync(arm_smmu_legacy_bus_init);
2321
Will Deacon45ae7cf2013-06-24 18:31:25 +01002322static int arm_smmu_device_remove(struct platform_device *pdev)
2323{
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002324 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002325
2326 if (!smmu)
2327 return -ENODEV;
2328
Will Deaconecfadb62013-07-31 19:21:28 +01002329 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002330 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002331
Sricharan Rd4a44f02018-12-04 11:52:10 +05302332 arm_smmu_rpm_get(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002333 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002334 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Sricharan Rd4a44f02018-12-04 11:52:10 +05302335 arm_smmu_rpm_put(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302336
Sricharan Rd4a44f02018-12-04 11:52:10 +05302337 if (pm_runtime_enabled(smmu->dev))
2338 pm_runtime_force_suspend(smmu->dev);
2339 else
2340 clk_bulk_disable(smmu->num_clks, smmu->clks);
2341
2342 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
Sricharan R96a299d2018-12-04 11:52:09 +05302343
Will Deacon45ae7cf2013-06-24 18:31:25 +01002344 return 0;
2345}
2346
Nate Watterson7aa86192017-06-29 18:18:15 -04002347static void arm_smmu_device_shutdown(struct platform_device *pdev)
2348{
2349 arm_smmu_device_remove(pdev);
2350}
2351
Sricharan R96a299d2018-12-04 11:52:09 +05302352static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
Robin Murphya2d866f2017-08-08 14:56:15 +01002353{
2354 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
Sricharan R96a299d2018-12-04 11:52:09 +05302355 int ret;
2356
2357 ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
2358 if (ret)
2359 return ret;
Robin Murphya2d866f2017-08-08 14:56:15 +01002360
2361 arm_smmu_device_reset(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302362
Robin Murphya2d866f2017-08-08 14:56:15 +01002363 return 0;
2364}
2365
Sricharan R96a299d2018-12-04 11:52:09 +05302366static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
2367{
2368 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2369
2370 clk_bulk_disable(smmu->num_clks, smmu->clks);
2371
2372 return 0;
2373}
2374
2375static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
2376{
2377 if (pm_runtime_suspended(dev))
2378 return 0;
2379
2380 return arm_smmu_runtime_resume(dev);
2381}
2382
2383static int __maybe_unused arm_smmu_pm_suspend(struct device *dev)
2384{
2385 if (pm_runtime_suspended(dev))
2386 return 0;
2387
2388 return arm_smmu_runtime_suspend(dev);
2389}
2390
2391static const struct dev_pm_ops arm_smmu_pm_ops = {
2392 SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_pm_suspend, arm_smmu_pm_resume)
2393 SET_RUNTIME_PM_OPS(arm_smmu_runtime_suspend,
2394 arm_smmu_runtime_resume, NULL)
2395};
Robin Murphya2d866f2017-08-08 14:56:15 +01002396
Will Deacon45ae7cf2013-06-24 18:31:25 +01002397static struct platform_driver arm_smmu_driver = {
2398 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002399 .name = "arm-smmu",
2400 .of_match_table = of_match_ptr(arm_smmu_of_match),
Robin Murphya2d866f2017-08-08 14:56:15 +01002401 .pm = &arm_smmu_pm_ops,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002402 },
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002403 .probe = arm_smmu_device_probe,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002404 .remove = arm_smmu_device_remove,
Nate Watterson7aa86192017-06-29 18:18:15 -04002405 .shutdown = arm_smmu_device_shutdown,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002406};
Robin Murphyf6810c12017-04-10 16:51:05 +05302407module_platform_driver(arm_smmu_driver);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002408
Will Deacon45ae7cf2013-06-24 18:31:25 +01002409MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2410MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2411MODULE_LICENSE("GPL v2");