blob: e9fd9117109e6e405575390f60e1f717ec1b51bb [file] [log] [blame]
Thomas Gleixner45051532019-05-29 16:57:47 -07001// SPDX-License-Identifier: GPL-2.0-only
Will Deacon45ae7cf2013-06-24 18:31:25 +01002/*
3 * IOMMU API for ARM architected SMMU implementations.
4 *
Will Deacon45ae7cf2013-06-24 18:31:25 +01005 * Copyright (C) 2013 ARM Limited
6 *
7 * Author: Will Deacon <will.deacon@arm.com>
8 *
9 * This driver currently supports:
10 * - SMMUv1 and v2 implementations
11 * - Stream-matching and stream-indexing
12 * - v7/v8 long-descriptor format
13 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010014 * - Context fault reporting
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +030015 * - Extended Stream ID (16 bit)
Will Deacon45ae7cf2013-06-24 18:31:25 +010016 */
17
18#define pr_fmt(fmt) "arm-smmu: " fmt
19
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +000020#include <linux/acpi.h>
21#include <linux/acpi_iort.h>
Robin Murphy1f3d5ca2016-09-12 17:13:49 +010022#include <linux/atomic.h>
Robin Murphy0caf5f42019-08-15 19:37:23 +010023#include <linux/bitfield.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010024#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000025#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010026#include <linux/dma-mapping.h>
27#include <linux/err.h>
28#include <linux/interrupt.h>
29#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010030#include <linux/io-64-nonatomic-hi-lo.h>
Rob Herringb77cf112019-02-05 10:37:31 -060031#include <linux/io-pgtable.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000033#include <linux/iopoll.h>
Paul Gortmakeraddb672f2018-12-01 14:19:16 -050034#include <linux/init.h>
35#include <linux/moduleparam.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010036#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010037#include <linux/of_address.h>
Robin Murphyd6fc5d92016-09-12 17:13:52 +010038#include <linux/of_device.h>
Robin Murphyadfec2e2016-09-12 17:13:55 +010039#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010040#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041#include <linux/platform_device.h>
Sricharan R96a299d2018-12-04 11:52:09 +053042#include <linux/pm_runtime.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010043#include <linux/slab.h>
44#include <linux/spinlock.h>
45
46#include <linux/amba/bus.h>
Nipun Guptaeab03e22018-09-10 19:19:18 +053047#include <linux/fsl/mc.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010048
Rob Clark2b037742017-08-09 10:43:03 -040049#include "arm-smmu-regs.h"
50
Robin Murphy4e4abae2019-06-03 14:15:37 +020051/*
52 * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
53 * global register space are still, in fact, using a hypervisor to mediate it
54 * by trapping and emulating register accesses. Sadly, some deployed versions
55 * of said trapping code have bugs wherein they go horribly wrong for stores
56 * using r31 (i.e. XZR/WZR) as the source register.
57 */
58#define QCOM_DUMMY_VAL -1
59
Rob Clark2b037742017-08-09 10:43:03 -040060#define ARM_MMU500_ACTLR_CPRE (1 << 1)
61
62#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
Feng Kan74f55d32017-10-11 15:08:39 -070063#define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10)
Rob Clark2b037742017-08-09 10:43:03 -040064#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
65
66#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
67#define TLB_SPIN_COUNT 10
Will Deacon45ae7cf2013-06-24 18:31:25 +010068
Will Deacon45ae7cf2013-06-24 18:31:25 +010069/* Maximum number of context banks per SMMU */
70#define ARM_SMMU_MAX_CBS 128
71
Eric Augerf3ebee82017-01-19 20:57:55 +000072#define MSI_IOVA_BASE 0x8000000
73#define MSI_IOVA_LENGTH 0x100000
74
Will Deacon4cf740b2014-07-14 19:47:39 +010075static int force_stage;
Paul Gortmakeraddb672f2018-12-01 14:19:16 -050076/*
77 * not really modular, but the easiest way to keep compat with existing
78 * bootargs behaviour is to continue using module_param() here.
79 */
Robin Murphy25a1c962016-02-10 14:25:33 +000080module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +010081MODULE_PARM_DESC(force_stage,
82 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Douglas Anderson954a03b2019-03-01 11:20:17 -080083static bool disable_bypass =
84 IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT);
Robin Murphy25a1c962016-02-10 14:25:33 +000085module_param(disable_bypass, bool, S_IRUGO);
86MODULE_PARM_DESC(disable_bypass,
87 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +010088
Robin Murphy09360402014-08-28 17:51:59 +010089enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +010090 ARM_SMMU_V1,
91 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +010092 ARM_SMMU_V2,
93};
94
Robin Murphy67b65a32016-04-13 18:12:57 +010095enum arm_smmu_implementation {
96 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +010097 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +010098 CAVIUM_SMMUV2,
Vivek Gautam89cddc52018-12-04 11:52:13 +053099 QCOM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100100};
101
Robin Murphy8e8b2032016-09-12 17:13:50 +0100102struct arm_smmu_s2cr {
Robin Murphy588888a2016-09-12 17:13:54 +0100103 struct iommu_group *group;
104 int count;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100105 enum arm_smmu_s2cr_type type;
106 enum arm_smmu_s2cr_privcfg privcfg;
107 u8 cbndx;
108};
109
110#define s2cr_init_val (struct arm_smmu_s2cr){ \
111 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
112}
113
Will Deacon45ae7cf2013-06-24 18:31:25 +0100114struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100115 u16 mask;
116 u16 id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100117 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100118};
119
Robin Murphy90df3732017-08-08 14:56:14 +0100120struct arm_smmu_cb {
121 u64 ttbr[2];
122 u32 tcr[2];
123 u32 mair[2];
124 struct arm_smmu_cfg *cfg;
125};
126
Will Deacona9a1b0b2014-05-01 18:05:08 +0100127struct arm_smmu_master_cfg {
Robin Murphyf80cd882016-09-14 15:21:39 +0100128 struct arm_smmu_device *smmu;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100129 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100130};
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100131#define INVALID_SMENDX -1
Robin Murphyadfec2e2016-09-12 17:13:55 +0100132#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
133#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000134#define fwspec_smendx(fw, i) \
135 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphyadfec2e2016-09-12 17:13:55 +0100136#define for_each_cfg_sme(fw, i, idx) \
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000137 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100138
139struct arm_smmu_device {
140 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100141
142 void __iomem *base;
Robin Murphy490325e2019-08-15 19:37:26 +0100143 unsigned int numpage;
144 unsigned int pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100145
146#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
147#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
148#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
149#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
150#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000151#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800152#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100153#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
154#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
155#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
156#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
157#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300158#define ARM_SMMU_FEAT_EXIDS (1 << 12)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100159 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000160
161#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
162 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100163 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100164 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100165
166 u32 num_context_banks;
167 u32 num_s2_context_banks;
168 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
Robin Murphy90df3732017-08-08 14:56:14 +0100169 struct arm_smmu_cb *cbs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100170 atomic_t irptndx;
171
172 u32 num_mapping_groups;
Robin Murphy21174242016-09-12 17:13:48 +0100173 u16 streamid_mask;
174 u16 smr_mask_mask;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100175 struct arm_smmu_smr *smrs;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100176 struct arm_smmu_s2cr *s2crs;
Robin Murphy588888a2016-09-12 17:13:54 +0100177 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100178
Will Deacon518f7132014-11-14 17:17:54 +0000179 unsigned long va_size;
180 unsigned long ipa_size;
181 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100182 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100183
184 u32 num_global_irqs;
185 u32 num_context_irqs;
186 unsigned int *irqs;
Sricharan R96a299d2018-12-04 11:52:09 +0530187 struct clk_bulk_data *clks;
188 int num_clks;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100189
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800190 u32 cavium_id_base; /* Specific to Cavium */
Joerg Roedel9648cbc2017-02-01 18:11:36 +0100191
Will Deacon8e517e72017-07-06 15:55:48 +0100192 spinlock_t global_sync_lock;
193
Joerg Roedel9648cbc2017-02-01 18:11:36 +0100194 /* IOMMU core code handle */
195 struct iommu_device iommu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100196};
197
Robin Murphy7602b872016-04-28 17:12:09 +0100198enum arm_smmu_context_fmt {
199 ARM_SMMU_CTX_FMT_NONE,
200 ARM_SMMU_CTX_FMT_AARCH64,
201 ARM_SMMU_CTX_FMT_AARCH32_L,
202 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100203};
204
205struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100206 u8 cbndx;
207 u8 irptndx;
Robin Murphy280b6832017-03-30 17:56:29 +0100208 union {
209 u16 asid;
210 u16 vmid;
211 };
Robin Murphy5114e962019-08-15 19:37:24 +0100212 enum arm_smmu_cbar_type cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100213 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100214};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100215#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100216
Will Deaconc752ce42014-06-25 22:46:31 +0100217enum arm_smmu_domain_stage {
218 ARM_SMMU_DOMAIN_S1 = 0,
219 ARM_SMMU_DOMAIN_S2,
220 ARM_SMMU_DOMAIN_NESTED,
Will Deacon61bc6712017-01-06 16:56:03 +0000221 ARM_SMMU_DOMAIN_BYPASS,
Will Deaconc752ce42014-06-25 22:46:31 +0100222};
223
Will Deacon45ae7cf2013-06-24 18:31:25 +0100224struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100225 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000226 struct io_pgtable_ops *pgtbl_ops;
Robin Murphy32b12442017-09-28 15:55:01 +0100227 const struct iommu_gather_ops *tlb_ops;
Will Deacon44680ee2014-06-25 11:29:12 +0100228 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100229 enum arm_smmu_domain_stage stage;
Robin Murphy44f68762018-09-20 17:10:27 +0100230 bool non_strict;
Will Deacon518f7132014-11-14 17:17:54 +0000231 struct mutex init_mutex; /* Protects smmu pointer */
Will Deacon8e517e72017-07-06 15:55:48 +0100232 spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
Joerg Roedel1d672632015-03-26 13:43:10 +0100233 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100234};
235
Robin Murphy00320ce2019-08-15 19:37:31 +0100236static int arm_smmu_gr0_ns(int offset)
237{
238 switch(offset) {
239 case ARM_SMMU_GR0_sCR0:
240 case ARM_SMMU_GR0_sACR:
241 case ARM_SMMU_GR0_sGFSR:
242 case ARM_SMMU_GR0_sGFSYNR0:
243 case ARM_SMMU_GR0_sGFSYNR1:
244 case ARM_SMMU_GR0_sGFSYNR2:
245 return offset + 0x400;
246 default:
247 return offset;
248 }
249}
250
Robin Murphyaadbf212019-08-15 19:37:29 +0100251static void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n)
252{
253 return smmu->base + (n << smmu->pgshift);
254}
255
256static u32 arm_smmu_readl(struct arm_smmu_device *smmu, int page, int offset)
257{
Robin Murphy00320ce2019-08-15 19:37:31 +0100258 if ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) && page == 0)
259 offset = arm_smmu_gr0_ns(offset);
260
Robin Murphyaadbf212019-08-15 19:37:29 +0100261 return readl_relaxed(arm_smmu_page(smmu, page) + offset);
262}
263
264static void arm_smmu_writel(struct arm_smmu_device *smmu, int page, int offset,
265 u32 val)
266{
Robin Murphy00320ce2019-08-15 19:37:31 +0100267 if ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) && page == 0)
268 offset = arm_smmu_gr0_ns(offset);
269
Robin Murphyaadbf212019-08-15 19:37:29 +0100270 writel_relaxed(val, arm_smmu_page(smmu, page) + offset);
271}
272
Robin Murphy19713fd2019-08-15 19:37:30 +0100273static u64 arm_smmu_readq(struct arm_smmu_device *smmu, int page, int offset)
274{
275 return readq_relaxed(arm_smmu_page(smmu, page) + offset);
276}
277
278static void arm_smmu_writeq(struct arm_smmu_device *smmu, int page, int offset,
279 u64 val)
280{
281 writeq_relaxed(val, arm_smmu_page(smmu, page) + offset);
282}
283
Robin Murphy00320ce2019-08-15 19:37:31 +0100284#define ARM_SMMU_GR0 0
Robin Murphyaadbf212019-08-15 19:37:29 +0100285#define ARM_SMMU_GR1 1
Robin Murphy19713fd2019-08-15 19:37:30 +0100286#define ARM_SMMU_CB(s, n) ((s)->numpage + (n))
Robin Murphyaadbf212019-08-15 19:37:29 +0100287
Robin Murphy00320ce2019-08-15 19:37:31 +0100288#define arm_smmu_gr0_read(s, o) \
289 arm_smmu_readl((s), ARM_SMMU_GR0, (o))
290#define arm_smmu_gr0_write(s, o, v) \
291 arm_smmu_writel((s), ARM_SMMU_GR0, (o), (v))
292
Robin Murphyaadbf212019-08-15 19:37:29 +0100293#define arm_smmu_gr1_read(s, o) \
294 arm_smmu_readl((s), ARM_SMMU_GR1, (o))
295#define arm_smmu_gr1_write(s, o, v) \
296 arm_smmu_writel((s), ARM_SMMU_GR1, (o), (v))
297
Robin Murphy19713fd2019-08-15 19:37:30 +0100298#define arm_smmu_cb_read(s, n, o) \
299 arm_smmu_readl((s), ARM_SMMU_CB((s), (n)), (o))
300#define arm_smmu_cb_write(s, n, o, v) \
301 arm_smmu_writel((s), ARM_SMMU_CB((s), (n)), (o), (v))
302#define arm_smmu_cb_readq(s, n, o) \
303 arm_smmu_readq((s), ARM_SMMU_CB((s), (n)), (o))
304#define arm_smmu_cb_writeq(s, n, o, v) \
305 arm_smmu_writeq((s), ARM_SMMU_CB((s), (n)), (o), (v))
306
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000307struct arm_smmu_option_prop {
308 u32 opt;
309 const char *prop;
310};
311
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800312static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
313
Robin Murphy021bb842016-09-14 15:26:46 +0100314static bool using_legacy_binding, using_generic_binding;
315
Mitchel Humpherys29073202014-07-08 09:52:18 -0700316static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000317 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
318 { 0, NULL},
319};
320
Sricharan Rd4a44f02018-12-04 11:52:10 +0530321static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
322{
323 if (pm_runtime_enabled(smmu->dev))
324 return pm_runtime_get_sync(smmu->dev);
325
326 return 0;
327}
328
329static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
330{
331 if (pm_runtime_enabled(smmu->dev))
332 pm_runtime_put(smmu->dev);
333}
334
Joerg Roedel1d672632015-03-26 13:43:10 +0100335static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
336{
337 return container_of(dom, struct arm_smmu_domain, domain);
338}
339
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000340static void parse_driver_options(struct arm_smmu_device *smmu)
341{
342 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700343
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000344 do {
345 if (of_property_read_bool(smmu->dev->of_node,
346 arm_smmu_options[i].prop)) {
347 smmu->options |= arm_smmu_options[i].opt;
348 dev_notice(smmu->dev, "option %s\n",
349 arm_smmu_options[i].prop);
350 }
351 } while (arm_smmu_options[++i].opt);
352}
353
Will Deacon8f68f8e2014-07-15 11:27:08 +0100354static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100355{
356 if (dev_is_pci(dev)) {
357 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700358
Will Deacona9a1b0b2014-05-01 18:05:08 +0100359 while (!pci_is_root_bus(bus))
360 bus = bus->parent;
Robin Murphyf80cd882016-09-14 15:21:39 +0100361 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100362 }
363
Robin Murphyf80cd882016-09-14 15:21:39 +0100364 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100365}
366
Robin Murphyf80cd882016-09-14 15:21:39 +0100367static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100368{
Robin Murphyf80cd882016-09-14 15:21:39 +0100369 *((__be32 *)data) = cpu_to_be32(alias);
370 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100371}
372
Robin Murphyf80cd882016-09-14 15:21:39 +0100373static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100374{
Robin Murphyf80cd882016-09-14 15:21:39 +0100375 struct of_phandle_iterator *it = *(void **)data;
376 struct device_node *np = it->node;
377 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100378
Robin Murphyf80cd882016-09-14 15:21:39 +0100379 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
380 "#stream-id-cells", 0)
381 if (it->node == np) {
382 *(void **)data = dev;
383 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700384 }
Robin Murphyf80cd882016-09-14 15:21:39 +0100385 it->node = np;
386 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100387}
388
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100389static struct platform_driver arm_smmu_driver;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100390static struct iommu_ops arm_smmu_ops;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100391
Robin Murphyadfec2e2016-09-12 17:13:55 +0100392static int arm_smmu_register_legacy_master(struct device *dev,
393 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100394{
Robin Murphyadfec2e2016-09-12 17:13:55 +0100395 struct device *smmu_dev;
Robin Murphyf80cd882016-09-14 15:21:39 +0100396 struct device_node *np;
397 struct of_phandle_iterator it;
398 void *data = &it;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100399 u32 *sids;
Robin Murphyf80cd882016-09-14 15:21:39 +0100400 __be32 pci_sid;
401 int err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100402
Robin Murphyf80cd882016-09-14 15:21:39 +0100403 np = dev_get_dev_node(dev);
404 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
405 of_node_put(np);
406 return -ENODEV;
407 }
408
409 it.node = np;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100410 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
411 __find_legacy_master_phandle);
Robin Murphyadfec2e2016-09-12 17:13:55 +0100412 smmu_dev = data;
Robin Murphyf80cd882016-09-14 15:21:39 +0100413 of_node_put(np);
414 if (err == 0)
415 return -ENODEV;
416 if (err < 0)
417 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100418
Robin Murphyf80cd882016-09-14 15:21:39 +0100419 if (dev_is_pci(dev)) {
420 /* "mmu-masters" assumes Stream ID == Requester ID */
421 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
422 &pci_sid);
423 it.cur = &pci_sid;
424 it.cur_count = 1;
425 }
426
Robin Murphyadfec2e2016-09-12 17:13:55 +0100427 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
428 &arm_smmu_ops);
429 if (err)
430 return err;
431
432 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
433 if (!sids)
Robin Murphyf80cd882016-09-14 15:21:39 +0100434 return -ENOMEM;
435
Robin Murphyadfec2e2016-09-12 17:13:55 +0100436 *smmu = dev_get_drvdata(smmu_dev);
437 of_phandle_iterator_args(&it, sids, it.cur_count);
438 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
439 kfree(sids);
440 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100441}
442
443static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
444{
445 int idx;
446
447 do {
448 idx = find_next_zero_bit(map, end, start);
449 if (idx == end)
450 return -ENOSPC;
451 } while (test_and_set_bit(idx, map));
452
453 return idx;
454}
455
456static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
457{
458 clear_bit(idx, map);
459}
460
461/* Wait for any pending TLB invalidations to complete */
Robin Murphy19713fd2019-08-15 19:37:30 +0100462static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
463 int sync, int status)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100464{
Robin Murphy8513c892017-03-30 17:56:32 +0100465 unsigned int spin_cnt, delay;
Robin Murphy19713fd2019-08-15 19:37:30 +0100466 u32 reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100467
Robin Murphy19713fd2019-08-15 19:37:30 +0100468 arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
Robin Murphy8513c892017-03-30 17:56:32 +0100469 for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
470 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100471 reg = arm_smmu_readl(smmu, page, status);
472 if (!(reg & sTLBGSTATUS_GSACTIVE))
Robin Murphy8513c892017-03-30 17:56:32 +0100473 return;
474 cpu_relax();
Will Deacon45ae7cf2013-06-24 18:31:25 +0100475 }
Robin Murphy8513c892017-03-30 17:56:32 +0100476 udelay(delay);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100477 }
Robin Murphy8513c892017-03-30 17:56:32 +0100478 dev_err_ratelimited(smmu->dev,
479 "TLB sync timed out -- SMMU may be deadlocked\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +0100480}
481
Robin Murphy11febfc2017-03-30 17:56:31 +0100482static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
Will Deacon1463fe42013-07-31 19:21:27 +0100483{
Will Deacon8e517e72017-07-06 15:55:48 +0100484 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100485
Will Deacon8e517e72017-07-06 15:55:48 +0100486 spin_lock_irqsave(&smmu->global_sync_lock, flags);
Robin Murphy00320ce2019-08-15 19:37:31 +0100487 __arm_smmu_tlb_sync(smmu, ARM_SMMU_GR0, ARM_SMMU_GR0_sTLBGSYNC,
Robin Murphy19713fd2019-08-15 19:37:30 +0100488 ARM_SMMU_GR0_sTLBGSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100489 spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000490}
491
Robin Murphy11febfc2017-03-30 17:56:31 +0100492static void arm_smmu_tlb_sync_context(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100493{
Will Deacon518f7132014-11-14 17:17:54 +0000494 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy11febfc2017-03-30 17:56:31 +0100495 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon8e517e72017-07-06 15:55:48 +0100496 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100497
Will Deacon8e517e72017-07-06 15:55:48 +0100498 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy19713fd2019-08-15 19:37:30 +0100499 __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx),
500 ARM_SMMU_CB_TLBSYNC, ARM_SMMU_CB_TLBSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100501 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000502}
503
Robin Murphy11febfc2017-03-30 17:56:31 +0100504static void arm_smmu_tlb_sync_vmid(void *cookie)
505{
506 struct arm_smmu_domain *smmu_domain = cookie;
507
508 arm_smmu_tlb_sync_global(smmu_domain->smmu);
509}
510
511static void arm_smmu_tlb_inv_context_s1(void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000512{
513 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy44f68762018-09-20 17:10:27 +0100514 /*
Robin Murphy19713fd2019-08-15 19:37:30 +0100515 * The TLBI write may be relaxed, so ensure that PTEs cleared by the
516 * current CPU are visible beforehand.
Robin Murphy44f68762018-09-20 17:10:27 +0100517 */
Robin Murphy19713fd2019-08-15 19:37:30 +0100518 wmb();
519 arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
520 ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
Robin Murphy11febfc2017-03-30 17:56:31 +0100521 arm_smmu_tlb_sync_context(cookie);
522}
523
524static void arm_smmu_tlb_inv_context_s2(void *cookie)
525{
526 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100527 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100528
Robin Murphy00320ce2019-08-15 19:37:31 +0100529 /* See above */
530 wmb();
531 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
Robin Murphy11febfc2017-03-30 17:56:31 +0100532 arm_smmu_tlb_sync_global(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100533}
534
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100535static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
536 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000537{
538 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100539 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000540 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy19713fd2019-08-15 19:37:30 +0100541 int reg, idx = cfg->cbndx;
Will Deacon518f7132014-11-14 17:17:54 +0000542
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100543 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
Will Deacon7d321bd32018-10-01 12:42:49 +0100544 wmb();
545
Robin Murphy19713fd2019-08-15 19:37:30 +0100546 reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
Will Deacon518f7132014-11-14 17:17:54 +0000547
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100548 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
549 iova = (iova >> 12) << 12;
550 iova |= cfg->asid;
Robin Murphy75df1382015-12-07 18:18:52 +0000551 do {
Robin Murphy19713fd2019-08-15 19:37:30 +0100552 arm_smmu_cb_write(smmu, idx, reg, iova);
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100553 iova += granule;
554 } while (size -= granule);
555 } else {
556 iova >>= 12;
557 iova |= (u64)cfg->asid << 48;
558 do {
Robin Murphy19713fd2019-08-15 19:37:30 +0100559 arm_smmu_cb_writeq(smmu, idx, reg, iova);
Robin Murphy75df1382015-12-07 18:18:52 +0000560 iova += granule >> 12;
561 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000562 }
563}
564
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100565static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
566 size_t granule, bool leaf, void *cookie)
567{
568 struct arm_smmu_domain *smmu_domain = cookie;
569 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy19713fd2019-08-15 19:37:30 +0100570 int reg, idx = smmu_domain->cfg.cbndx;
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100571
572 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
573 wmb();
574
Robin Murphy19713fd2019-08-15 19:37:30 +0100575 reg = leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100576 iova >>= 12;
577 do {
Robin Murphy61005762019-08-15 19:37:28 +0100578 if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy19713fd2019-08-15 19:37:30 +0100579 arm_smmu_cb_writeq(smmu, idx, reg, iova);
Robin Murphy61005762019-08-15 19:37:28 +0100580 else
Robin Murphy19713fd2019-08-15 19:37:30 +0100581 arm_smmu_cb_write(smmu, idx, reg, iova);
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100582 iova += granule >> 12;
583 } while (size -= granule);
584}
585
Robin Murphy11febfc2017-03-30 17:56:31 +0100586/*
587 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
588 * almost negligible, but the benefit of getting the first one in as far ahead
589 * of the sync as possible is significant, hence we don't just make this a
590 * no-op and set .tlb_sync to arm_smmu_inv_context_s2() as you might think.
591 */
592static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
593 size_t granule, bool leaf, void *cookie)
594{
595 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy00320ce2019-08-15 19:37:31 +0100596 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy11febfc2017-03-30 17:56:31 +0100597
Robin Murphy00320ce2019-08-15 19:37:31 +0100598 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
Will Deacon7d321bd32018-10-01 12:42:49 +0100599 wmb();
600
Robin Murphy00320ce2019-08-15 19:37:31 +0100601 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
Robin Murphy11febfc2017-03-30 17:56:31 +0100602}
603
604static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = {
605 .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100606 .tlb_add_flush = arm_smmu_tlb_inv_range_s1,
Robin Murphy11febfc2017-03-30 17:56:31 +0100607 .tlb_sync = arm_smmu_tlb_sync_context,
608};
609
610static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = {
611 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100612 .tlb_add_flush = arm_smmu_tlb_inv_range_s2,
Robin Murphy11febfc2017-03-30 17:56:31 +0100613 .tlb_sync = arm_smmu_tlb_sync_context,
614};
615
616static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = {
617 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
618 .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync,
619 .tlb_sync = arm_smmu_tlb_sync_vmid,
Will Deacon518f7132014-11-14 17:17:54 +0000620};
621
Will Deacon45ae7cf2013-06-24 18:31:25 +0100622static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
623{
Vivek Gautambc580b52019-04-22 12:40:36 +0530624 u32 fsr, fsynr, cbfrsynra;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100625 unsigned long iova;
626 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100627 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100628 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy19713fd2019-08-15 19:37:30 +0100629 int idx = smmu_domain->cfg.cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100630
Robin Murphy19713fd2019-08-15 19:37:30 +0100631 fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100632 if (!(fsr & FSR_FAULT))
633 return IRQ_NONE;
634
Robin Murphy19713fd2019-08-15 19:37:30 +0100635 fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
636 iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
637 cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100638
Will Deacon3714ce1d2016-08-05 19:49:45 +0100639 dev_err_ratelimited(smmu->dev,
Vivek Gautambc580b52019-04-22 12:40:36 +0530640 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
Robin Murphy19713fd2019-08-15 19:37:30 +0100641 fsr, iova, fsynr, cbfrsynra, idx);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100642
Robin Murphy19713fd2019-08-15 19:37:30 +0100643 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100644 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100645}
646
647static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
648{
649 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
650 struct arm_smmu_device *smmu = dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100651
Robin Murphy00320ce2019-08-15 19:37:31 +0100652 gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
653 gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0);
654 gfsynr1 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR1);
655 gfsynr2 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100656
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000657 if (!gfsr)
658 return IRQ_NONE;
659
Will Deacon45ae7cf2013-06-24 18:31:25 +0100660 dev_err_ratelimited(smmu->dev,
661 "Unexpected global fault, this could be serious\n");
662 dev_err_ratelimited(smmu->dev,
663 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
664 gfsr, gfsynr0, gfsynr1, gfsynr2);
665
Robin Murphy00320ce2019-08-15 19:37:31 +0100666 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr);
Will Deaconadaba322013-07-31 19:21:26 +0100667 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100668}
669
Will Deacon518f7132014-11-14 17:17:54 +0000670static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
671 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100672{
Will Deacon44680ee2014-06-25 11:29:12 +0100673 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy90df3732017-08-08 14:56:14 +0100674 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
675 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
676
677 cb->cfg = cfg;
678
Robin Murphy620565a2019-08-15 19:37:25 +0100679 /* TCR */
Robin Murphy90df3732017-08-08 14:56:14 +0100680 if (stage1) {
681 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
682 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
683 } else {
684 cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
685 cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Robin Murphy620565a2019-08-15 19:37:25 +0100686 cb->tcr[1] |= FIELD_PREP(TCR2_SEP, TCR2_SEP_UPSTREAM);
Robin Murphy90df3732017-08-08 14:56:14 +0100687 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy620565a2019-08-15 19:37:25 +0100688 cb->tcr[1] |= TCR2_AS;
Robin Murphy90df3732017-08-08 14:56:14 +0100689 }
690 } else {
691 cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
692 }
693
694 /* TTBRs */
695 if (stage1) {
696 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
697 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
698 cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
699 } else {
700 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Robin Murphy620565a2019-08-15 19:37:25 +0100701 cb->ttbr[0] |= FIELD_PREP(TTBRn_ASID, cfg->asid);
Robin Murphy90df3732017-08-08 14:56:14 +0100702 cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Robin Murphy620565a2019-08-15 19:37:25 +0100703 cb->ttbr[1] |= FIELD_PREP(TTBRn_ASID, cfg->asid);
Robin Murphy90df3732017-08-08 14:56:14 +0100704 }
705 } else {
706 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
707 }
708
709 /* MAIRs (stage-1 only) */
710 if (stage1) {
711 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
712 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
713 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
714 } else {
715 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
716 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
717 }
718 }
719}
720
721static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
722{
723 u32 reg;
724 bool stage1;
725 struct arm_smmu_cb *cb = &smmu->cbs[idx];
726 struct arm_smmu_cfg *cfg = cb->cfg;
Robin Murphy90df3732017-08-08 14:56:14 +0100727
728 /* Unassigned context banks only need disabling */
729 if (!cfg) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100730 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, 0);
Robin Murphy90df3732017-08-08 14:56:14 +0100731 return;
732 }
733
Will Deacon44680ee2014-06-25 11:29:12 +0100734 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100735
Robin Murphy90df3732017-08-08 14:56:14 +0100736 /* CBA2R */
Will Deacon4a1c93c2015-03-04 12:21:03 +0000737 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100738 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy5114e962019-08-15 19:37:24 +0100739 reg = CBA2R_VA64;
Robin Murphy7602b872016-04-28 17:12:09 +0100740 else
Robin Murphy5114e962019-08-15 19:37:24 +0100741 reg = 0;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800742 /* 16-bit VMIDs live in CBA2R */
743 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Robin Murphy5114e962019-08-15 19:37:24 +0100744 reg |= FIELD_PREP(CBA2R_VMID16, cfg->vmid);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800745
Robin Murphyaadbf212019-08-15 19:37:29 +0100746 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBA2R(idx), reg);
Will Deacon4a1c93c2015-03-04 12:21:03 +0000747 }
748
Will Deacon45ae7cf2013-06-24 18:31:25 +0100749 /* CBAR */
Robin Murphy5114e962019-08-15 19:37:24 +0100750 reg = FIELD_PREP(CBAR_TYPE, cfg->cbar);
Robin Murphyb7862e32016-04-13 18:13:03 +0100751 if (smmu->version < ARM_SMMU_V2)
Robin Murphy5114e962019-08-15 19:37:24 +0100752 reg |= FIELD_PREP(CBAR_IRPTNDX, cfg->irptndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100753
Will Deacon57ca90f2014-02-06 14:59:05 +0000754 /*
755 * Use the weakest shareability/memory types, so they are
756 * overridden by the ttbcr/pte.
757 */
758 if (stage1) {
Robin Murphy5114e962019-08-15 19:37:24 +0100759 reg |= FIELD_PREP(CBAR_S1_BPSHCFG, CBAR_S1_BPSHCFG_NSH) |
760 FIELD_PREP(CBAR_S1_MEMATTR, CBAR_S1_MEMATTR_WB);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800761 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
762 /* 8-bit VMIDs live in CBAR */
Robin Murphy5114e962019-08-15 19:37:24 +0100763 reg |= FIELD_PREP(CBAR_VMID, cfg->vmid);
Will Deacon57ca90f2014-02-06 14:59:05 +0000764 }
Robin Murphyaadbf212019-08-15 19:37:29 +0100765 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100766
Sunil Goutham125458a2017-03-28 16:11:12 +0530767 /*
Robin Murphy620565a2019-08-15 19:37:25 +0100768 * TCR
Sunil Goutham125458a2017-03-28 16:11:12 +0530769 * We must write this before the TTBRs, since it determines the
770 * access behaviour of some fields (in particular, ASID[15:8]).
771 */
Robin Murphy90df3732017-08-08 14:56:14 +0100772 if (stage1 && smmu->version > ARM_SMMU_V1)
Robin Murphy19713fd2019-08-15 19:37:30 +0100773 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]);
774 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100775
Will Deacon45ae7cf2013-06-24 18:31:25 +0100776 /* TTBRs */
Robin Murphy90df3732017-08-08 14:56:14 +0100777 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100778 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid);
779 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
780 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100781 } else {
Robin Murphy19713fd2019-08-15 19:37:30 +0100782 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
Robin Murphy90df3732017-08-08 14:56:14 +0100783 if (stage1)
Robin Murphy19713fd2019-08-15 19:37:30 +0100784 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR1,
785 cb->ttbr[1]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100786 }
787
Will Deacon518f7132014-11-14 17:17:54 +0000788 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100789 if (stage1) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100790 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]);
791 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100792 }
793
Will Deacon45ae7cf2013-06-24 18:31:25 +0100794 /* SCTLR */
Robin Murphy60705292016-08-11 17:44:06 +0100795 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100796 if (stage1)
797 reg |= SCTLR_S1_ASIDPNE;
Robin Murphy90df3732017-08-08 14:56:14 +0100798 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
799 reg |= SCTLR_E;
800
Robin Murphy19713fd2019-08-15 19:37:30 +0100801 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100802}
803
804static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100805 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100806{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100807 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000808 unsigned long ias, oas;
809 struct io_pgtable_ops *pgtbl_ops;
810 struct io_pgtable_cfg pgtbl_cfg;
811 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100812 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100813 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100814
Will Deacon518f7132014-11-14 17:17:54 +0000815 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100816 if (smmu_domain->smmu)
817 goto out_unlock;
818
Will Deacon61bc6712017-01-06 16:56:03 +0000819 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
820 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
821 smmu_domain->smmu = smmu;
822 goto out_unlock;
823 }
824
Will Deaconc752ce42014-06-25 22:46:31 +0100825 /*
826 * Mapping the requested stage onto what we support is surprisingly
827 * complicated, mainly because the spec allows S1+S2 SMMUs without
828 * support for nested translation. That means we end up with the
829 * following table:
830 *
831 * Requested Supported Actual
832 * S1 N S1
833 * S1 S1+S2 S1
834 * S1 S2 S2
835 * S1 S1 S1
836 * N N N
837 * N S1+S2 S2
838 * N S2 S2
839 * N S1 S1
840 *
841 * Note that you can't actually request stage-2 mappings.
842 */
843 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
844 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
845 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
846 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
847
Robin Murphy7602b872016-04-28 17:12:09 +0100848 /*
849 * Choosing a suitable context format is even more fiddly. Until we
850 * grow some way for the caller to express a preference, and/or move
851 * the decision into the io-pgtable code where it arguably belongs,
852 * just aim for the closest thing to the rest of the system, and hope
853 * that the hardware isn't esoteric enough that we can't assume AArch64
854 * support to be a superset of AArch32 support...
855 */
856 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
857 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100858 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
859 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
860 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
861 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
862 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100863 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
864 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
865 ARM_SMMU_FEAT_FMT_AARCH64_16K |
866 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
867 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
868
869 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
870 ret = -EINVAL;
871 goto out_unlock;
872 }
873
Will Deaconc752ce42014-06-25 22:46:31 +0100874 switch (smmu_domain->stage) {
875 case ARM_SMMU_DOMAIN_S1:
876 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
877 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000878 ias = smmu->va_size;
879 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100880 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000881 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +0100882 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +0000883 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100884 ias = min(ias, 32UL);
885 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +0100886 } else {
887 fmt = ARM_V7S;
888 ias = min(ias, 32UL);
889 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +0100890 }
Robin Murphy32b12442017-09-28 15:55:01 +0100891 smmu_domain->tlb_ops = &arm_smmu_s1_tlb_ops;
Will Deaconc752ce42014-06-25 22:46:31 +0100892 break;
893 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100894 /*
895 * We will likely want to change this if/when KVM gets
896 * involved.
897 */
Will Deaconc752ce42014-06-25 22:46:31 +0100898 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100899 cfg->cbar = CBAR_TYPE_S2_TRANS;
900 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000901 ias = smmu->ipa_size;
902 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100903 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000904 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100905 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000906 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100907 ias = min(ias, 40UL);
908 oas = min(oas, 40UL);
909 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100910 if (smmu->version == ARM_SMMU_V2)
Robin Murphy32b12442017-09-28 15:55:01 +0100911 smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v2;
Robin Murphy11febfc2017-03-30 17:56:31 +0100912 else
Robin Murphy32b12442017-09-28 15:55:01 +0100913 smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v1;
Will Deaconc752ce42014-06-25 22:46:31 +0100914 break;
915 default:
916 ret = -EINVAL;
917 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100918 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100919 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
920 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200921 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100922 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100923
Will Deacon44680ee2014-06-25 11:29:12 +0100924 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100925 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100926 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
927 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100928 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100929 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100930 }
931
Robin Murphy280b6832017-03-30 17:56:29 +0100932 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
933 cfg->vmid = cfg->cbndx + 1 + smmu->cavium_id_base;
934 else
935 cfg->asid = cfg->cbndx + smmu->cavium_id_base;
936
Will Deacon518f7132014-11-14 17:17:54 +0000937 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100938 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000939 .ias = ias,
940 .oas = oas,
Will Deacon4f418452019-06-25 12:51:25 +0100941 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
Robin Murphy32b12442017-09-28 15:55:01 +0100942 .tlb = smmu_domain->tlb_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100943 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000944 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100945
Robin Murphy44f68762018-09-20 17:10:27 +0100946 if (smmu_domain->non_strict)
947 pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
948
Will Deacon518f7132014-11-14 17:17:54 +0000949 smmu_domain->smmu = smmu;
950 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
951 if (!pgtbl_ops) {
952 ret = -ENOMEM;
953 goto out_clear_smmu;
954 }
955
Robin Murphyd5466352016-05-09 17:20:09 +0100956 /* Update the domain's page sizes to reflect the page table format */
957 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Robin Murphy455eb7d2016-09-12 17:13:58 +0100958 domain->geometry.aperture_end = (1UL << ias) - 1;
959 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +0000960
961 /* Initialise the context bank with our page table cfg */
962 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
Robin Murphy90df3732017-08-08 14:56:14 +0100963 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000964
965 /*
966 * Request context fault interrupt. Do this last to avoid the
967 * handler seeing a half-initialised domain state.
968 */
Will Deacon44680ee2014-06-25 11:29:12 +0100969 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800970 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
971 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200972 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100973 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100974 cfg->irptndx, irq);
975 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100976 }
977
Will Deacon518f7132014-11-14 17:17:54 +0000978 mutex_unlock(&smmu_domain->init_mutex);
979
980 /* Publish page table ops for map/unmap */
981 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100982 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100983
Will Deacon518f7132014-11-14 17:17:54 +0000984out_clear_smmu:
985 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100986out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +0000987 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100988 return ret;
989}
990
991static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
992{
Joerg Roedel1d672632015-03-26 13:43:10 +0100993 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100994 struct arm_smmu_device *smmu = smmu_domain->smmu;
995 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Sricharan Rd4a44f02018-12-04 11:52:10 +0530996 int ret, irq;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100997
Will Deacon61bc6712017-01-06 16:56:03 +0000998 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100999 return;
1000
Sricharan Rd4a44f02018-12-04 11:52:10 +05301001 ret = arm_smmu_rpm_get(smmu);
1002 if (ret < 0)
1003 return;
1004
Will Deacon518f7132014-11-14 17:17:54 +00001005 /*
1006 * Disable the context bank and free the page tables before freeing
1007 * it.
1008 */
Robin Murphy90df3732017-08-08 14:56:14 +01001009 smmu->cbs[cfg->cbndx].cfg = NULL;
1010 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001011
Will Deacon44680ee2014-06-25 11:29:12 +01001012 if (cfg->irptndx != INVALID_IRPTNDX) {
1013 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001014 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001015 }
1016
Markus Elfring44830b02015-11-06 18:32:41 +01001017 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +01001018 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301019
1020 arm_smmu_rpm_put(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001021}
1022
Joerg Roedel1d672632015-03-26 13:43:10 +01001023static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001024{
1025 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001026
Will Deacon61bc6712017-01-06 16:56:03 +00001027 if (type != IOMMU_DOMAIN_UNMANAGED &&
1028 type != IOMMU_DOMAIN_DMA &&
1029 type != IOMMU_DOMAIN_IDENTITY)
Joerg Roedel1d672632015-03-26 13:43:10 +01001030 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001031 /*
1032 * Allocate the domain and initialise some of its data structures.
1033 * We can't really do anything meaningful until we've added a
1034 * master.
1035 */
1036 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1037 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001038 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001039
Robin Murphy021bb842016-09-14 15:26:46 +01001040 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
1041 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +00001042 kfree(smmu_domain);
1043 return NULL;
1044 }
1045
Will Deacon518f7132014-11-14 17:17:54 +00001046 mutex_init(&smmu_domain->init_mutex);
Robin Murphy523d7422017-06-22 16:53:56 +01001047 spin_lock_init(&smmu_domain->cb_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001048
1049 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001050}
1051
Joerg Roedel1d672632015-03-26 13:43:10 +01001052static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001053{
Joerg Roedel1d672632015-03-26 13:43:10 +01001054 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001055
1056 /*
1057 * Free the domain resources. We assume that all devices have
1058 * already been detached.
1059 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001060 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001061 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001062 kfree(smmu_domain);
1063}
1064
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001065static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1066{
1067 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphy0caf5f42019-08-15 19:37:23 +01001068 u32 reg = FIELD_PREP(SMR_ID, smr->id) | FIELD_PREP(SMR_MASK, smr->mask);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001069
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001070 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001071 reg |= SMR_VALID;
Robin Murphy00320ce2019-08-15 19:37:31 +01001072 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001073}
1074
Robin Murphy8e8b2032016-09-12 17:13:50 +01001075static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1076{
1077 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
Robin Murphy0caf5f42019-08-15 19:37:23 +01001078 u32 reg = FIELD_PREP(S2CR_TYPE, s2cr->type) |
1079 FIELD_PREP(S2CR_CBNDX, s2cr->cbndx) |
1080 FIELD_PREP(S2CR_PRIVCFG, s2cr->privcfg);
Robin Murphy8e8b2032016-09-12 17:13:50 +01001081
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001082 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
1083 smmu->smrs[idx].valid)
1084 reg |= S2CR_EXIDVALID;
Robin Murphy00320ce2019-08-15 19:37:31 +01001085 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
Robin Murphy8e8b2032016-09-12 17:13:50 +01001086}
1087
1088static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1089{
1090 arm_smmu_write_s2cr(smmu, idx);
1091 if (smmu->smrs)
1092 arm_smmu_write_smr(smmu, idx);
1093}
1094
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001095/*
1096 * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
1097 * should be called after sCR0 is written.
1098 */
1099static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
1100{
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001101 u32 smr;
1102
1103 if (!smmu->smrs)
1104 return;
1105
1106 /*
1107 * SMR.ID bits may not be preserved if the corresponding MASK
1108 * bits are set, so check each one separately. We can reject
1109 * masters later if they try to claim IDs outside these masks.
1110 */
Robin Murphy0caf5f42019-08-15 19:37:23 +01001111 smr = FIELD_PREP(SMR_ID, smmu->streamid_mask);
Robin Murphy00320ce2019-08-15 19:37:31 +01001112 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(0), smr);
1113 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(0));
Robin Murphy0caf5f42019-08-15 19:37:23 +01001114 smmu->streamid_mask = FIELD_GET(SMR_ID, smr);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001115
Robin Murphy0caf5f42019-08-15 19:37:23 +01001116 smr = FIELD_PREP(SMR_MASK, smmu->streamid_mask);
Robin Murphy00320ce2019-08-15 19:37:31 +01001117 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(0), smr);
1118 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(0));
Robin Murphy0caf5f42019-08-15 19:37:23 +01001119 smmu->smr_mask_mask = FIELD_GET(SMR_MASK, smr);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001120}
1121
Robin Murphy588888a2016-09-12 17:13:54 +01001122static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001123{
1124 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy588888a2016-09-12 17:13:54 +01001125 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001126
Robin Murphy588888a2016-09-12 17:13:54 +01001127 /* Stream indexing is blissfully easy */
1128 if (!smrs)
1129 return id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001130
Robin Murphy588888a2016-09-12 17:13:54 +01001131 /* Validating SMRs is... less so */
1132 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1133 if (!smrs[i].valid) {
1134 /*
1135 * Note the first free entry we come across, which
1136 * we'll claim in the end if nothing else matches.
1137 */
1138 if (free_idx < 0)
1139 free_idx = i;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001140 continue;
1141 }
Robin Murphy588888a2016-09-12 17:13:54 +01001142 /*
1143 * If the new entry is _entirely_ matched by an existing entry,
1144 * then reuse that, with the guarantee that there also cannot
1145 * be any subsequent conflicting entries. In normal use we'd
1146 * expect simply identical entries for this case, but there's
1147 * no harm in accommodating the generalisation.
1148 */
1149 if ((mask & smrs[i].mask) == mask &&
1150 !((id ^ smrs[i].id) & ~smrs[i].mask))
1151 return i;
1152 /*
1153 * If the new entry has any other overlap with an existing one,
1154 * though, then there always exists at least one stream ID
1155 * which would cause a conflict, and we can't allow that risk.
1156 */
1157 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1158 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001159 }
1160
Robin Murphy588888a2016-09-12 17:13:54 +01001161 return free_idx;
1162}
1163
1164static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1165{
1166 if (--smmu->s2crs[idx].count)
1167 return false;
1168
1169 smmu->s2crs[idx] = s2cr_init_val;
1170 if (smmu->smrs)
1171 smmu->smrs[idx].valid = false;
1172
1173 return true;
1174}
1175
1176static int arm_smmu_master_alloc_smes(struct device *dev)
1177{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001178 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001179 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy588888a2016-09-12 17:13:54 +01001180 struct arm_smmu_device *smmu = cfg->smmu;
1181 struct arm_smmu_smr *smrs = smmu->smrs;
1182 struct iommu_group *group;
1183 int i, idx, ret;
1184
1185 mutex_lock(&smmu->stream_map_mutex);
1186 /* Figure out a viable stream map entry allocation */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001187 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001188 u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]);
1189 u16 mask = FIELD_GET(SMR_MASK, fwspec->ids[i]);
Robin Murphy021bb842016-09-14 15:26:46 +01001190
Robin Murphy588888a2016-09-12 17:13:54 +01001191 if (idx != INVALID_SMENDX) {
1192 ret = -EEXIST;
1193 goto out_err;
1194 }
1195
Robin Murphy021bb842016-09-14 15:26:46 +01001196 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy588888a2016-09-12 17:13:54 +01001197 if (ret < 0)
1198 goto out_err;
1199
1200 idx = ret;
1201 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy021bb842016-09-14 15:26:46 +01001202 smrs[idx].id = sid;
1203 smrs[idx].mask = mask;
Robin Murphy588888a2016-09-12 17:13:54 +01001204 smrs[idx].valid = true;
1205 }
1206 smmu->s2crs[idx].count++;
1207 cfg->smendx[i] = (s16)idx;
1208 }
1209
1210 group = iommu_group_get_for_dev(dev);
1211 if (!group)
1212 group = ERR_PTR(-ENOMEM);
1213 if (IS_ERR(group)) {
1214 ret = PTR_ERR(group);
1215 goto out_err;
1216 }
1217 iommu_group_put(group);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001218
Will Deacon45ae7cf2013-06-24 18:31:25 +01001219 /* It worked! Now, poke the actual hardware */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001220 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001221 arm_smmu_write_sme(smmu, idx);
1222 smmu->s2crs[idx].group = group;
1223 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001224
Robin Murphy588888a2016-09-12 17:13:54 +01001225 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001226 return 0;
1227
Robin Murphy588888a2016-09-12 17:13:54 +01001228out_err:
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001229 while (i--) {
Robin Murphy588888a2016-09-12 17:13:54 +01001230 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001231 cfg->smendx[i] = INVALID_SMENDX;
1232 }
Robin Murphy588888a2016-09-12 17:13:54 +01001233 mutex_unlock(&smmu->stream_map_mutex);
1234 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001235}
1236
Robin Murphyadfec2e2016-09-12 17:13:55 +01001237static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001238{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001239 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1240 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphyd3097e32016-09-12 17:13:53 +01001241 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001242
Robin Murphy588888a2016-09-12 17:13:54 +01001243 mutex_lock(&smmu->stream_map_mutex);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001244 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001245 if (arm_smmu_free_sme(smmu, idx))
1246 arm_smmu_write_sme(smmu, idx);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001247 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001248 }
Robin Murphy588888a2016-09-12 17:13:54 +01001249 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001250}
1251
Will Deacon45ae7cf2013-06-24 18:31:25 +01001252static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphyadfec2e2016-09-12 17:13:55 +01001253 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001254{
Will Deacon44680ee2014-06-25 11:29:12 +01001255 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001256 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001257 u8 cbndx = smmu_domain->cfg.cbndx;
Will Deacon61bc6712017-01-06 16:56:03 +00001258 enum arm_smmu_s2cr_type type;
Robin Murphy588888a2016-09-12 17:13:54 +01001259 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001260
Will Deacon61bc6712017-01-06 16:56:03 +00001261 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
1262 type = S2CR_TYPE_BYPASS;
1263 else
1264 type = S2CR_TYPE_TRANS;
1265
Robin Murphyadfec2e2016-09-12 17:13:55 +01001266 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy8e8b2032016-09-12 17:13:50 +01001267 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy588888a2016-09-12 17:13:54 +01001268 continue;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001269
Robin Murphy8e8b2032016-09-12 17:13:50 +01001270 s2cr[idx].type = type;
Sricharan Re1989802017-01-06 18:58:15 +05301271 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001272 s2cr[idx].cbndx = cbndx;
1273 arm_smmu_write_s2cr(smmu, idx);
Will Deacon43b412b2014-07-15 11:22:24 +01001274 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001275 return 0;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001276}
1277
Will Deacon45ae7cf2013-06-24 18:31:25 +01001278static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1279{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001280 int ret;
Joerg Roedel9b468f72018-11-29 14:01:00 +01001281 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001282 struct arm_smmu_device *smmu;
Joerg Roedel1d672632015-03-26 13:43:10 +01001283 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001284
Robin Murphyadfec2e2016-09-12 17:13:55 +01001285 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001286 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1287 return -ENXIO;
1288 }
1289
Robin Murphyfba4f8e2016-10-17 12:06:21 +01001290 /*
1291 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1292 * domains between of_xlate() and add_device() - we have no way to cope
1293 * with that, so until ARM gets converted to rely on groups and default
1294 * domains, just say no (but more politely than by dereferencing NULL).
1295 * This should be at least a WARN_ON once that's sorted.
1296 */
1297 if (!fwspec->iommu_priv)
1298 return -ENODEV;
1299
Robin Murphyadfec2e2016-09-12 17:13:55 +01001300 smmu = fwspec_smmu(fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301301
1302 ret = arm_smmu_rpm_get(smmu);
1303 if (ret < 0)
1304 return ret;
1305
Will Deacon518f7132014-11-14 17:17:54 +00001306 /* Ensure that the domain is finalised */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001307 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001308 if (ret < 0)
Sricharan Rd4a44f02018-12-04 11:52:10 +05301309 goto rpm_put;
Will Deacon518f7132014-11-14 17:17:54 +00001310
Will Deacon45ae7cf2013-06-24 18:31:25 +01001311 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001312 * Sanity check the domain. We don't support domains across
1313 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001314 */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001315 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001316 dev_err(dev,
1317 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphyadfec2e2016-09-12 17:13:55 +01001318 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Sricharan Rd4a44f02018-12-04 11:52:10 +05301319 ret = -EINVAL;
1320 goto rpm_put;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001321 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001322
1323 /* Looks ok, so add the device to the domain */
Sricharan Rd4a44f02018-12-04 11:52:10 +05301324 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
1325
1326rpm_put:
1327 arm_smmu_rpm_put(smmu);
1328 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001329}
1330
Will Deacon45ae7cf2013-06-24 18:31:25 +01001331static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001332 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001333{
Robin Murphy523d7422017-06-22 16:53:56 +01001334 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301335 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1336 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001337
Will Deacon518f7132014-11-14 17:17:54 +00001338 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001339 return -ENODEV;
1340
Sricharan Rd4a44f02018-12-04 11:52:10 +05301341 arm_smmu_rpm_get(smmu);
1342 ret = ops->map(ops, iova, paddr, size, prot);
1343 arm_smmu_rpm_put(smmu);
1344
1345 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001346}
1347
1348static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1349 size_t size)
1350{
Robin Murphy523d7422017-06-22 16:53:56 +01001351 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301352 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1353 size_t ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001354
Will Deacon518f7132014-11-14 17:17:54 +00001355 if (!ops)
1356 return 0;
1357
Sricharan Rd4a44f02018-12-04 11:52:10 +05301358 arm_smmu_rpm_get(smmu);
1359 ret = ops->unmap(ops, iova, size);
1360 arm_smmu_rpm_put(smmu);
1361
1362 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001363}
1364
Robin Murphy44f68762018-09-20 17:10:27 +01001365static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
1366{
1367 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301368 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy44f68762018-09-20 17:10:27 +01001369
Sricharan Rd4a44f02018-12-04 11:52:10 +05301370 if (smmu_domain->tlb_ops) {
1371 arm_smmu_rpm_get(smmu);
Robin Murphy44f68762018-09-20 17:10:27 +01001372 smmu_domain->tlb_ops->tlb_flush_all(smmu_domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301373 arm_smmu_rpm_put(smmu);
1374 }
Robin Murphy44f68762018-09-20 17:10:27 +01001375}
1376
Robin Murphy32b12442017-09-28 15:55:01 +01001377static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
1378{
1379 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301380 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy32b12442017-09-28 15:55:01 +01001381
Sricharan Rd4a44f02018-12-04 11:52:10 +05301382 if (smmu_domain->tlb_ops) {
1383 arm_smmu_rpm_get(smmu);
Robin Murphy32b12442017-09-28 15:55:01 +01001384 smmu_domain->tlb_ops->tlb_sync(smmu_domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301385 arm_smmu_rpm_put(smmu);
1386 }
Robin Murphy32b12442017-09-28 15:55:01 +01001387}
1388
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001389static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1390 dma_addr_t iova)
1391{
Joerg Roedel1d672632015-03-26 13:43:10 +01001392 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001393 struct arm_smmu_device *smmu = smmu_domain->smmu;
1394 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1395 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1396 struct device *dev = smmu->dev;
Robin Murphy19713fd2019-08-15 19:37:30 +01001397 void __iomem *reg;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001398 u32 tmp;
1399 u64 phys;
Robin Murphy523d7422017-06-22 16:53:56 +01001400 unsigned long va, flags;
Robin Murphy19713fd2019-08-15 19:37:30 +01001401 int ret, idx = cfg->cbndx;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301402
1403 ret = arm_smmu_rpm_get(smmu);
1404 if (ret < 0)
1405 return 0;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001406
Robin Murphy523d7422017-06-22 16:53:56 +01001407 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy661d9622015-05-27 17:09:34 +01001408 va = iova & ~0xfffUL;
Robin Murphy61005762019-08-15 19:37:28 +01001409 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy19713fd2019-08-15 19:37:30 +01001410 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
Robin Murphy61005762019-08-15 19:37:28 +01001411 else
Robin Murphy19713fd2019-08-15 19:37:30 +01001412 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001413
Robin Murphy19713fd2019-08-15 19:37:30 +01001414 reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR;
1415 if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ATSR_ACTIVE), 5, 50)) {
Robin Murphy523d7422017-06-22 16:53:56 +01001416 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001417 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001418 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001419 &iova);
1420 return ops->iova_to_phys(ops, iova);
1421 }
1422
Robin Murphy19713fd2019-08-15 19:37:30 +01001423 phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR);
Robin Murphy523d7422017-06-22 16:53:56 +01001424 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001425 if (phys & CB_PAR_F) {
1426 dev_err(dev, "translation fault!\n");
1427 dev_err(dev, "PAR = 0x%llx\n", phys);
1428 return 0;
1429 }
1430
Sricharan Rd4a44f02018-12-04 11:52:10 +05301431 arm_smmu_rpm_put(smmu);
1432
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001433 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1434}
1435
Will Deacon45ae7cf2013-06-24 18:31:25 +01001436static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001437 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001438{
Joerg Roedel1d672632015-03-26 13:43:10 +01001439 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Robin Murphy523d7422017-06-22 16:53:56 +01001440 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001441
Sunil Gouthambdf95922017-04-25 15:27:52 +05301442 if (domain->type == IOMMU_DOMAIN_IDENTITY)
1443 return iova;
1444
Will Deacon518f7132014-11-14 17:17:54 +00001445 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001446 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001447
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001448 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
Robin Murphy523d7422017-06-22 16:53:56 +01001449 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1450 return arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001451
Robin Murphy523d7422017-06-22 16:53:56 +01001452 return ops->iova_to_phys(ops, iova);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001453}
1454
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001455static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001456{
Will Deacond0948942014-06-24 17:30:10 +01001457 switch (cap) {
1458 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001459 /*
1460 * Return true here as the SMMU can always send out coherent
1461 * requests.
1462 */
1463 return true;
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001464 case IOMMU_CAP_NOEXEC:
1465 return true;
Will Deacond0948942014-06-24 17:30:10 +01001466 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001467 return false;
Will Deacond0948942014-06-24 17:30:10 +01001468 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001469}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001470
Suzuki K Poulose92ce7e82019-06-14 18:54:00 +01001471static int arm_smmu_match_node(struct device *dev, const void *data)
Robin Murphy021bb842016-09-14 15:26:46 +01001472{
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001473 return dev->fwnode == data;
Robin Murphy021bb842016-09-14 15:26:46 +01001474}
1475
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001476static
1477struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
Robin Murphy021bb842016-09-14 15:26:46 +01001478{
1479 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001480 fwnode, arm_smmu_match_node);
Robin Murphy021bb842016-09-14 15:26:46 +01001481 put_device(dev);
1482 return dev ? dev_get_drvdata(dev) : NULL;
1483}
1484
Will Deacon03edb222015-01-19 14:27:33 +00001485static int arm_smmu_add_device(struct device *dev)
1486{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001487 struct arm_smmu_device *smmu;
Robin Murphyf80cd882016-09-14 15:21:39 +01001488 struct arm_smmu_master_cfg *cfg;
Joerg Roedel9b468f72018-11-29 14:01:00 +01001489 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001490 int i, ret;
1491
Robin Murphy021bb842016-09-14 15:26:46 +01001492 if (using_legacy_binding) {
1493 ret = arm_smmu_register_legacy_master(dev, &smmu);
Artem Savkova7990c62017-08-08 12:26:02 +02001494
1495 /*
1496 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
1497 * will allocate/initialise a new one. Thus we need to update fwspec for
1498 * later use.
1499 */
Joerg Roedel9b468f72018-11-29 14:01:00 +01001500 fwspec = dev_iommu_fwspec_get(dev);
Robin Murphy021bb842016-09-14 15:26:46 +01001501 if (ret)
1502 goto out_free;
Robin Murphy3c117b52016-11-02 17:31:32 +00001503 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001504 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
Robin Murphy021bb842016-09-14 15:26:46 +01001505 } else {
1506 return -ENODEV;
1507 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001508
1509 ret = -EINVAL;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001510 for (i = 0; i < fwspec->num_ids; i++) {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001511 u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]);
1512 u16 mask = FIELD_GET(SMR_MASK, fwspec->ids[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01001513
Robin Murphyadfec2e2016-09-12 17:13:55 +01001514 if (sid & ~smmu->streamid_mask) {
Robin Murphyf80cd882016-09-14 15:21:39 +01001515 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy021bb842016-09-14 15:26:46 +01001516 sid, smmu->streamid_mask);
1517 goto out_free;
1518 }
1519 if (mask & ~smmu->smr_mask_mask) {
1520 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
Peng Fan6323f472017-04-21 17:03:36 +08001521 mask, smmu->smr_mask_mask);
Robin Murphyf80cd882016-09-14 15:21:39 +01001522 goto out_free;
1523 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001524 }
Will Deacon03edb222015-01-19 14:27:33 +00001525
Robin Murphyadfec2e2016-09-12 17:13:55 +01001526 ret = -ENOMEM;
1527 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1528 GFP_KERNEL);
1529 if (!cfg)
1530 goto out_free;
1531
1532 cfg->smmu = smmu;
1533 fwspec->iommu_priv = cfg;
1534 while (i--)
1535 cfg->smendx[i] = INVALID_SMENDX;
1536
Sricharan Rd4a44f02018-12-04 11:52:10 +05301537 ret = arm_smmu_rpm_get(smmu);
1538 if (ret < 0)
1539 goto out_cfg_free;
1540
Robin Murphy588888a2016-09-12 17:13:54 +01001541 ret = arm_smmu_master_alloc_smes(dev);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301542 arm_smmu_rpm_put(smmu);
1543
Robin Murphyadfec2e2016-09-12 17:13:55 +01001544 if (ret)
Vivek Gautamc54451a2017-07-06 15:07:00 +05301545 goto out_cfg_free;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001546
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001547 iommu_device_link(&smmu->iommu, dev);
1548
Sricharan R655e3642018-12-04 11:52:11 +05301549 device_link_add(dev, smmu->dev,
1550 DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
1551
Robin Murphyadfec2e2016-09-12 17:13:55 +01001552 return 0;
Robin Murphyf80cd882016-09-14 15:21:39 +01001553
Vivek Gautamc54451a2017-07-06 15:07:00 +05301554out_cfg_free:
1555 kfree(cfg);
Robin Murphyf80cd882016-09-14 15:21:39 +01001556out_free:
Robin Murphyadfec2e2016-09-12 17:13:55 +01001557 iommu_fwspec_free(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001558 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00001559}
1560
Will Deacon45ae7cf2013-06-24 18:31:25 +01001561static void arm_smmu_remove_device(struct device *dev)
1562{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001563 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001564 struct arm_smmu_master_cfg *cfg;
1565 struct arm_smmu_device *smmu;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301566 int ret;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001567
Robin Murphyadfec2e2016-09-12 17:13:55 +01001568 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyf80cd882016-09-14 15:21:39 +01001569 return;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001570
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001571 cfg = fwspec->iommu_priv;
1572 smmu = cfg->smmu;
1573
Sricharan Rd4a44f02018-12-04 11:52:10 +05301574 ret = arm_smmu_rpm_get(smmu);
1575 if (ret < 0)
1576 return;
1577
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001578 iommu_device_unlink(&smmu->iommu, dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001579 arm_smmu_master_free_smes(fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301580
1581 arm_smmu_rpm_put(smmu);
1582
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001583 iommu_group_remove_device(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001584 kfree(fwspec->iommu_priv);
1585 iommu_fwspec_free(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001586}
1587
Joerg Roedelaf659932015-10-21 23:51:41 +02001588static struct iommu_group *arm_smmu_device_group(struct device *dev)
1589{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001590 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001591 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy588888a2016-09-12 17:13:54 +01001592 struct iommu_group *group = NULL;
1593 int i, idx;
1594
Robin Murphyadfec2e2016-09-12 17:13:55 +01001595 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001596 if (group && smmu->s2crs[idx].group &&
1597 group != smmu->s2crs[idx].group)
1598 return ERR_PTR(-EINVAL);
1599
1600 group = smmu->s2crs[idx].group;
1601 }
1602
1603 if (group)
Robin Murphye1b44cb2016-11-11 17:59:22 +00001604 return iommu_group_ref_get(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001605
1606 if (dev_is_pci(dev))
1607 group = pci_device_group(dev);
Nipun Guptaeab03e22018-09-10 19:19:18 +05301608 else if (dev_is_fsl_mc(dev))
1609 group = fsl_mc_device_group(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001610 else
1611 group = generic_device_group(dev);
1612
Joerg Roedelaf659932015-10-21 23:51:41 +02001613 return group;
1614}
1615
Will Deaconc752ce42014-06-25 22:46:31 +01001616static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1617 enum iommu_attr attr, void *data)
1618{
Joerg Roedel1d672632015-03-26 13:43:10 +01001619 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001620
Robin Murphy44f68762018-09-20 17:10:27 +01001621 switch(domain->type) {
1622 case IOMMU_DOMAIN_UNMANAGED:
1623 switch (attr) {
1624 case DOMAIN_ATTR_NESTING:
1625 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1626 return 0;
1627 default:
1628 return -ENODEV;
1629 }
1630 break;
1631 case IOMMU_DOMAIN_DMA:
1632 switch (attr) {
1633 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1634 *(int *)data = smmu_domain->non_strict;
1635 return 0;
1636 default:
1637 return -ENODEV;
1638 }
1639 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001640 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001641 return -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001642 }
1643}
1644
1645static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1646 enum iommu_attr attr, void *data)
1647{
Will Deacon518f7132014-11-14 17:17:54 +00001648 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001649 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001650
Will Deacon518f7132014-11-14 17:17:54 +00001651 mutex_lock(&smmu_domain->init_mutex);
1652
Robin Murphy44f68762018-09-20 17:10:27 +01001653 switch(domain->type) {
1654 case IOMMU_DOMAIN_UNMANAGED:
1655 switch (attr) {
1656 case DOMAIN_ATTR_NESTING:
1657 if (smmu_domain->smmu) {
1658 ret = -EPERM;
1659 goto out_unlock;
1660 }
1661
1662 if (*(int *)data)
1663 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1664 else
1665 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1666 break;
1667 default:
1668 ret = -ENODEV;
Will Deacon518f7132014-11-14 17:17:54 +00001669 }
Robin Murphy44f68762018-09-20 17:10:27 +01001670 break;
1671 case IOMMU_DOMAIN_DMA:
1672 switch (attr) {
1673 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1674 smmu_domain->non_strict = *(int *)data;
1675 break;
1676 default:
1677 ret = -ENODEV;
1678 }
Will Deacon518f7132014-11-14 17:17:54 +00001679 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001680 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001681 ret = -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001682 }
Will Deacon518f7132014-11-14 17:17:54 +00001683out_unlock:
1684 mutex_unlock(&smmu_domain->init_mutex);
1685 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001686}
1687
Robin Murphy021bb842016-09-14 15:26:46 +01001688static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1689{
Robin Murphy56fbf602017-03-31 12:03:33 +01001690 u32 mask, fwid = 0;
Robin Murphy021bb842016-09-14 15:26:46 +01001691
1692 if (args->args_count > 0)
Robin Murphy0caf5f42019-08-15 19:37:23 +01001693 fwid |= FIELD_PREP(SMR_ID, args->args[0]);
Robin Murphy021bb842016-09-14 15:26:46 +01001694
1695 if (args->args_count > 1)
Robin Murphy0caf5f42019-08-15 19:37:23 +01001696 fwid |= FIELD_PREP(SMR_MASK, args->args[1]);
Robin Murphy56fbf602017-03-31 12:03:33 +01001697 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
Robin Murphy0caf5f42019-08-15 19:37:23 +01001698 fwid |= FIELD_PREP(SMR_MASK, mask);
Robin Murphy021bb842016-09-14 15:26:46 +01001699
1700 return iommu_fwspec_add_ids(dev, &fwid, 1);
1701}
1702
Eric Augerf3ebee82017-01-19 20:57:55 +00001703static void arm_smmu_get_resv_regions(struct device *dev,
1704 struct list_head *head)
1705{
1706 struct iommu_resv_region *region;
1707 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1708
1709 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00001710 prot, IOMMU_RESV_SW_MSI);
Eric Augerf3ebee82017-01-19 20:57:55 +00001711 if (!region)
1712 return;
1713
1714 list_add_tail(&region->list, head);
Robin Murphy273df962017-03-16 17:00:19 +00001715
1716 iommu_dma_get_resv_regions(dev, head);
Eric Augerf3ebee82017-01-19 20:57:55 +00001717}
1718
1719static void arm_smmu_put_resv_regions(struct device *dev,
1720 struct list_head *head)
1721{
1722 struct iommu_resv_region *entry, *next;
1723
1724 list_for_each_entry_safe(entry, next, head, list)
1725 kfree(entry);
1726}
1727
Will Deacon518f7132014-11-14 17:17:54 +00001728static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001729 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001730 .domain_alloc = arm_smmu_domain_alloc,
1731 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001732 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001733 .map = arm_smmu_map,
1734 .unmap = arm_smmu_unmap,
Robin Murphy44f68762018-09-20 17:10:27 +01001735 .flush_iotlb_all = arm_smmu_flush_iotlb_all,
Robin Murphy32b12442017-09-28 15:55:01 +01001736 .iotlb_sync = arm_smmu_iotlb_sync,
Will Deaconc752ce42014-06-25 22:46:31 +01001737 .iova_to_phys = arm_smmu_iova_to_phys,
1738 .add_device = arm_smmu_add_device,
1739 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001740 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001741 .domain_get_attr = arm_smmu_domain_get_attr,
1742 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy021bb842016-09-14 15:26:46 +01001743 .of_xlate = arm_smmu_of_xlate,
Eric Augerf3ebee82017-01-19 20:57:55 +00001744 .get_resv_regions = arm_smmu_get_resv_regions,
1745 .put_resv_regions = arm_smmu_put_resv_regions,
Will Deacon518f7132014-11-14 17:17:54 +00001746 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001747};
1748
1749static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1750{
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001751 int i;
Peng Fan3ca37122016-05-03 21:50:30 +08001752 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001753
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001754 /* clear global FSR */
Robin Murphy00320ce2019-08-15 19:37:31 +01001755 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
1756 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001757
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001758 /*
1759 * Reset stream mapping groups: Initial values mark all SMRn as
1760 * invalid and all S2CRn as bypass unless overridden.
1761 */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001762 for (i = 0; i < smmu->num_mapping_groups; ++i)
1763 arm_smmu_write_sme(smmu, i);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001764
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301765 if (smmu->model == ARM_MMU500) {
1766 /*
1767 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1768 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1769 * bit is only present in MMU-500r2 onwards.
1770 */
Robin Murphy00320ce2019-08-15 19:37:31 +01001771 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID7);
Robin Murphy0caf5f42019-08-15 19:37:23 +01001772 major = FIELD_GET(ID7_MAJOR, reg);
Robin Murphy00320ce2019-08-15 19:37:31 +01001773 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sACR);
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301774 if (major >= 2)
1775 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1776 /*
1777 * Allow unmatched Stream IDs to allocate bypass
1778 * TLB entries for reduced latency.
1779 */
Feng Kan74f55d32017-10-11 15:08:39 -07001780 reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN;
Robin Murphy00320ce2019-08-15 19:37:31 +01001781 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sACR, reg);
Peng Fan3ca37122016-05-03 21:50:30 +08001782 }
1783
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001784 /* Make sure all context banks are disabled and clear CB_FSR */
1785 for (i = 0; i < smmu->num_context_banks; ++i) {
Robin Murphy90df3732017-08-08 14:56:14 +01001786 arm_smmu_write_context_bank(smmu, i);
Robin Murphy19713fd2019-08-15 19:37:30 +01001787 arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, FSR_FAULT);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001788 /*
1789 * Disable MMU-500's not-particularly-beneficial next-page
1790 * prefetcher for the sake of errata #841119 and #826419.
1791 */
1792 if (smmu->model == ARM_MMU500) {
Robin Murphy19713fd2019-08-15 19:37:30 +01001793 reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001794 reg &= ~ARM_MMU500_ACTLR_CPRE;
Robin Murphy19713fd2019-08-15 19:37:30 +01001795 arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_ACTLR, reg);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001796 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001797 }
Will Deacon1463fe42013-07-31 19:21:27 +01001798
Will Deacon45ae7cf2013-06-24 18:31:25 +01001799 /* Invalidate the TLB, just in case */
Robin Murphy00320ce2019-08-15 19:37:31 +01001800 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLH, QCOM_DUMMY_VAL);
1801 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLNSNH, QCOM_DUMMY_VAL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001802
Robin Murphy00320ce2019-08-15 19:37:31 +01001803 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001804
Will Deacon45ae7cf2013-06-24 18:31:25 +01001805 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001806 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001807
1808 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001809 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001810
Robin Murphy25a1c962016-02-10 14:25:33 +00001811 /* Enable client access, handling unmatched streams as appropriate */
1812 reg &= ~sCR0_CLIENTPD;
1813 if (disable_bypass)
1814 reg |= sCR0_USFCFG;
1815 else
1816 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001817
1818 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001819 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001820
1821 /* Don't upgrade barriers */
Robin Murphy0caf5f42019-08-15 19:37:23 +01001822 reg &= ~(sCR0_BSU);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001823
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001824 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1825 reg |= sCR0_VMID16EN;
1826
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001827 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1828 reg |= sCR0_EXIDENABLE;
1829
Will Deacon45ae7cf2013-06-24 18:31:25 +01001830 /* Push the button */
Robin Murphy11febfc2017-03-30 17:56:31 +01001831 arm_smmu_tlb_sync_global(smmu);
Robin Murphy00320ce2019-08-15 19:37:31 +01001832 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001833}
1834
1835static int arm_smmu_id_size_to_bits(int size)
1836{
1837 switch (size) {
1838 case 0:
1839 return 32;
1840 case 1:
1841 return 36;
1842 case 2:
1843 return 40;
1844 case 3:
1845 return 42;
1846 case 4:
1847 return 44;
1848 case 5:
1849 default:
1850 return 48;
1851 }
1852}
1853
1854static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1855{
Robin Murphy490325e2019-08-15 19:37:26 +01001856 unsigned int size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001857 u32 id;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001858 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001859 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001860
1861 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001862 dev_notice(smmu->dev, "SMMUv%d with:\n",
1863 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001864
1865 /* ID0 */
Robin Murphy00320ce2019-08-15 19:37:31 +01001866 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001867
1868 /* Restrict available stages based on module parameter */
1869 if (force_stage == 1)
1870 id &= ~(ID0_S2TS | ID0_NTS);
1871 else if (force_stage == 2)
1872 id &= ~(ID0_S1TS | ID0_NTS);
1873
Will Deacon45ae7cf2013-06-24 18:31:25 +01001874 if (id & ID0_S1TS) {
1875 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1876 dev_notice(smmu->dev, "\tstage 1 translation\n");
1877 }
1878
1879 if (id & ID0_S2TS) {
1880 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1881 dev_notice(smmu->dev, "\tstage 2 translation\n");
1882 }
1883
1884 if (id & ID0_NTS) {
1885 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1886 dev_notice(smmu->dev, "\tnested translation\n");
1887 }
1888
1889 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001890 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001891 dev_err(smmu->dev, "\tno translation support!\n");
1892 return -ENODEV;
1893 }
1894
Robin Murphyb7862e32016-04-13 18:13:03 +01001895 if ((id & ID0_S1TS) &&
1896 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001897 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1898 dev_notice(smmu->dev, "\taddress translation ops\n");
1899 }
1900
Robin Murphybae2c2d2015-07-29 19:46:05 +01001901 /*
1902 * In order for DMA API calls to work properly, we must defer to what
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001903 * the FW says about coherency, regardless of what the hardware claims.
Robin Murphybae2c2d2015-07-29 19:46:05 +01001904 * Fortunately, this also opens up a workaround for systems where the
1905 * ID register value has ended up configured incorrectly.
1906 */
Robin Murphybae2c2d2015-07-29 19:46:05 +01001907 cttw_reg = !!(id & ID0_CTTW);
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001908 if (cttw_fw || cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001909 dev_notice(smmu->dev, "\t%scoherent table walk\n",
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001910 cttw_fw ? "" : "non-");
1911 if (cttw_fw != cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001912 dev_notice(smmu->dev,
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001913 "\t(IDR0.CTTW overridden by FW configuration)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001914
Robin Murphy21174242016-09-12 17:13:48 +01001915 /* Max. number of entries we have for stream matching/indexing */
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001916 if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
1917 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1918 size = 1 << 16;
1919 } else {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001920 size = 1 << FIELD_GET(ID0_NUMSIDB, id);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001921 }
Robin Murphy21174242016-09-12 17:13:48 +01001922 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001923 if (id & ID0_SMS) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001924 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy0caf5f42019-08-15 19:37:23 +01001925 size = FIELD_GET(ID0_NUMSMRG, id);
Robin Murphy21174242016-09-12 17:13:48 +01001926 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001927 dev_err(smmu->dev,
1928 "stream-matching supported, but no SMRs present!\n");
1929 return -ENODEV;
1930 }
1931
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001932 /* Zero-initialised to mark as invalid */
1933 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1934 GFP_KERNEL);
1935 if (!smmu->smrs)
1936 return -ENOMEM;
1937
Will Deacon45ae7cf2013-06-24 18:31:25 +01001938 dev_notice(smmu->dev,
Robin Murphy490325e2019-08-15 19:37:26 +01001939 "\tstream matching with %u register groups", size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001940 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001941 /* s2cr->type == 0 means translation, so initialise explicitly */
1942 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1943 GFP_KERNEL);
1944 if (!smmu->s2crs)
1945 return -ENOMEM;
1946 for (i = 0; i < size; i++)
1947 smmu->s2crs[i] = s2cr_init_val;
1948
Robin Murphy21174242016-09-12 17:13:48 +01001949 smmu->num_mapping_groups = size;
Robin Murphy588888a2016-09-12 17:13:54 +01001950 mutex_init(&smmu->stream_map_mutex);
Will Deacon8e517e72017-07-06 15:55:48 +01001951 spin_lock_init(&smmu->global_sync_lock);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001952
Robin Murphy7602b872016-04-28 17:12:09 +01001953 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1954 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1955 if (!(id & ID0_PTFS_NO_AARCH32S))
1956 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1957 }
1958
Will Deacon45ae7cf2013-06-24 18:31:25 +01001959 /* ID1 */
Robin Murphy00320ce2019-08-15 19:37:31 +01001960 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001961 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001962
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001963 /* Check for size mismatch of SMMU address space from mapped region */
Robin Murphy0caf5f42019-08-15 19:37:23 +01001964 size = 1 << (FIELD_GET(ID1_NUMPAGENDXB, id) + 1);
Robin Murphy490325e2019-08-15 19:37:26 +01001965 if (smmu->numpage != 2 * size << smmu->pgshift)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001966 dev_warn(smmu->dev,
Robin Murphy490325e2019-08-15 19:37:26 +01001967 "SMMU address space size (0x%x) differs from mapped region size (0x%x)!\n",
1968 2 * size << smmu->pgshift, smmu->numpage);
1969 /* Now properly encode NUMPAGE to subsequently derive SMMU_CB_BASE */
1970 smmu->numpage = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001971
Robin Murphy0caf5f42019-08-15 19:37:23 +01001972 smmu->num_s2_context_banks = FIELD_GET(ID1_NUMS2CB, id);
1973 smmu->num_context_banks = FIELD_GET(ID1_NUMCB, id);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001974 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1975 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1976 return -ENODEV;
1977 }
1978 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1979 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001980 /*
1981 * Cavium CN88xx erratum #27704.
1982 * Ensure ASID and VMID allocation is unique across all SMMUs in
1983 * the system.
1984 */
1985 if (smmu->model == CAVIUM_SMMUV2) {
1986 smmu->cavium_id_base =
1987 atomic_add_return(smmu->num_context_banks,
1988 &cavium_smmu_context_count);
1989 smmu->cavium_id_base -= smmu->num_context_banks;
Robert Richter53c35dce2017-03-13 11:39:01 +01001990 dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
Robin Murphye086d912016-04-13 18:12:58 +01001991 }
Robin Murphy90df3732017-08-08 14:56:14 +01001992 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
1993 sizeof(*smmu->cbs), GFP_KERNEL);
1994 if (!smmu->cbs)
1995 return -ENOMEM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001996
1997 /* ID2 */
Robin Murphy00320ce2019-08-15 19:37:31 +01001998 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2);
Robin Murphy0caf5f42019-08-15 19:37:23 +01001999 size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_IAS, id));
Will Deacon518f7132014-11-14 17:17:54 +00002000 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002001
Will Deacon518f7132014-11-14 17:17:54 +00002002 /* The output mask is also applied for bypass */
Robin Murphy0caf5f42019-08-15 19:37:23 +01002003 size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_OAS, id));
Will Deacon518f7132014-11-14 17:17:54 +00002004 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002005
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08002006 if (id & ID2_VMID16)
2007 smmu->features |= ARM_SMMU_FEAT_VMID16;
2008
Robin Murphyf1d84542015-03-04 16:41:05 +00002009 /*
2010 * What the page table walker can address actually depends on which
2011 * descriptor format is in use, but since a) we don't know that yet,
2012 * and b) it can vary per context bank, this will have to do...
2013 */
2014 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
2015 dev_warn(smmu->dev,
2016 "failed to set DMA mask for table walker\n");
2017
Robin Murphyb7862e32016-04-13 18:13:03 +01002018 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00002019 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01002020 if (smmu->version == ARM_SMMU_V1_64K)
2021 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002022 } else {
Robin Murphy0caf5f42019-08-15 19:37:23 +01002023 size = FIELD_GET(ID2_UBS, id);
Will Deacon518f7132014-11-14 17:17:54 +00002024 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00002025 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01002026 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00002027 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01002028 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00002029 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01002030 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002031 }
2032
Robin Murphy7602b872016-04-28 17:12:09 +01002033 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01002034 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01002035 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01002036 if (smmu->features &
2037 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01002038 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01002039 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01002040 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01002041 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01002042 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01002043
Robin Murphyd5466352016-05-09 17:20:09 +01002044 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2045 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2046 else
2047 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2048 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
2049 smmu->pgsize_bitmap);
2050
Will Deacon518f7132014-11-14 17:17:54 +00002051
Will Deacon28d60072014-09-01 16:24:48 +01002052 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
2053 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002054 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002055
2056 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
2057 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002058 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002059
Will Deacon45ae7cf2013-06-24 18:31:25 +01002060 return 0;
2061}
2062
Robin Murphy67b65a32016-04-13 18:12:57 +01002063struct arm_smmu_match_data {
2064 enum arm_smmu_arch_version version;
2065 enum arm_smmu_implementation model;
2066};
2067
2068#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
Sricharan R96a299d2018-12-04 11:52:09 +05302069static const struct arm_smmu_match_data name = { .version = ver, .model = imp }
Robin Murphy67b65a32016-04-13 18:12:57 +01002070
2071ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
2072ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01002073ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002074ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01002075ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Vivek Gautam89cddc52018-12-04 11:52:13 +05302076ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01002077
Joerg Roedel09b52692014-10-02 12:24:45 +02002078static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01002079 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
2080 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
2081 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01002082 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002083 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01002084 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Vivek Gautam89cddc52018-12-04 11:52:13 +05302085 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01002086 { },
2087};
Robin Murphy09360402014-08-28 17:51:59 +01002088
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002089#ifdef CONFIG_ACPI
2090static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
2091{
2092 int ret = 0;
2093
2094 switch (model) {
2095 case ACPI_IORT_SMMU_V1:
2096 case ACPI_IORT_SMMU_CORELINK_MMU400:
2097 smmu->version = ARM_SMMU_V1;
2098 smmu->model = GENERIC_SMMU;
2099 break;
Robin Murphy84c24372017-06-19 16:41:56 +01002100 case ACPI_IORT_SMMU_CORELINK_MMU401:
2101 smmu->version = ARM_SMMU_V1_64K;
2102 smmu->model = GENERIC_SMMU;
2103 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002104 case ACPI_IORT_SMMU_V2:
2105 smmu->version = ARM_SMMU_V2;
2106 smmu->model = GENERIC_SMMU;
2107 break;
2108 case ACPI_IORT_SMMU_CORELINK_MMU500:
2109 smmu->version = ARM_SMMU_V2;
2110 smmu->model = ARM_MMU500;
2111 break;
Robin Murphy84c24372017-06-19 16:41:56 +01002112 case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
2113 smmu->version = ARM_SMMU_V2;
2114 smmu->model = CAVIUM_SMMUV2;
2115 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002116 default:
2117 ret = -ENODEV;
2118 }
2119
2120 return ret;
2121}
2122
2123static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2124 struct arm_smmu_device *smmu)
2125{
2126 struct device *dev = smmu->dev;
2127 struct acpi_iort_node *node =
2128 *(struct acpi_iort_node **)dev_get_platdata(dev);
2129 struct acpi_iort_smmu *iort_smmu;
2130 int ret;
2131
2132 /* Retrieve SMMU1/2 specific data */
2133 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
2134
2135 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
2136 if (ret < 0)
2137 return ret;
2138
2139 /* Ignore the configuration access interrupt */
2140 smmu->num_global_irqs = 1;
2141
2142 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
2143 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2144
2145 return 0;
2146}
2147#else
2148static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2149 struct arm_smmu_device *smmu)
2150{
2151 return -ENODEV;
2152}
2153#endif
2154
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002155static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2156 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002157{
Robin Murphy67b65a32016-04-13 18:12:57 +01002158 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002159 struct device *dev = &pdev->dev;
Robin Murphy021bb842016-09-14 15:26:46 +01002160 bool legacy_binding;
2161
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002162 if (of_property_read_u32(dev->of_node, "#global-interrupts",
2163 &smmu->num_global_irqs)) {
2164 dev_err(dev, "missing #global-interrupts property\n");
2165 return -ENODEV;
2166 }
2167
2168 data = of_device_get_match_data(dev);
2169 smmu->version = data->version;
2170 smmu->model = data->model;
2171
2172 parse_driver_options(smmu);
2173
Robin Murphy021bb842016-09-14 15:26:46 +01002174 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
2175 if (legacy_binding && !using_generic_binding) {
2176 if (!using_legacy_binding)
2177 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
2178 using_legacy_binding = true;
2179 } else if (!legacy_binding && !using_legacy_binding) {
2180 using_generic_binding = true;
2181 } else {
2182 dev_err(dev, "not probing due to mismatched DT properties\n");
2183 return -ENODEV;
2184 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002185
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002186 if (of_dma_is_coherent(dev->of_node))
2187 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2188
2189 return 0;
2190}
2191
Robin Murphyf6810c12017-04-10 16:51:05 +05302192static void arm_smmu_bus_init(void)
2193{
2194 /* Oh, for a proper bus abstraction */
2195 if (!iommu_present(&platform_bus_type))
2196 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2197#ifdef CONFIG_ARM_AMBA
2198 if (!iommu_present(&amba_bustype))
2199 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2200#endif
2201#ifdef CONFIG_PCI
2202 if (!iommu_present(&pci_bus_type)) {
2203 pci_request_acs();
2204 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2205 }
2206#endif
Nipun Guptaeab03e22018-09-10 19:19:18 +05302207#ifdef CONFIG_FSL_MC_BUS
2208 if (!iommu_present(&fsl_mc_bus_type))
2209 bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
2210#endif
Robin Murphyf6810c12017-04-10 16:51:05 +05302211}
2212
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002213static int arm_smmu_device_probe(struct platform_device *pdev)
2214{
2215 struct resource *res;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002216 resource_size_t ioaddr;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002217 struct arm_smmu_device *smmu;
2218 struct device *dev = &pdev->dev;
2219 int num_irqs, i, err;
2220
Will Deacon45ae7cf2013-06-24 18:31:25 +01002221 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2222 if (!smmu) {
2223 dev_err(dev, "failed to allocate arm_smmu_device\n");
2224 return -ENOMEM;
2225 }
2226 smmu->dev = dev;
2227
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002228 if (dev->of_node)
2229 err = arm_smmu_device_dt_probe(pdev, smmu);
2230 else
2231 err = arm_smmu_device_acpi_probe(pdev, smmu);
2232
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002233 if (err)
2234 return err;
Robin Murphy09360402014-08-28 17:51:59 +01002235
Will Deacon45ae7cf2013-06-24 18:31:25 +01002236 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002237 ioaddr = res->start;
Julia Lawall8a7f4312013-08-19 12:20:37 +01002238 smmu->base = devm_ioremap_resource(dev, res);
2239 if (IS_ERR(smmu->base))
2240 return PTR_ERR(smmu->base);
Robin Murphy490325e2019-08-15 19:37:26 +01002241 /*
2242 * The resource size should effectively match the value of SMMU_TOP;
2243 * stash that temporarily until we know PAGESIZE to validate it with.
2244 */
2245 smmu->numpage = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002246
Will Deacon45ae7cf2013-06-24 18:31:25 +01002247 num_irqs = 0;
2248 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2249 num_irqs++;
2250 if (num_irqs > smmu->num_global_irqs)
2251 smmu->num_context_irqs++;
2252 }
2253
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002254 if (!smmu->num_context_irqs) {
2255 dev_err(dev, "found %d interrupts but expected at least %d\n",
2256 num_irqs, smmu->num_global_irqs + 1);
2257 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002258 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002259
Kees Cooka86854d2018-06-12 14:07:58 -07002260 smmu->irqs = devm_kcalloc(dev, num_irqs, sizeof(*smmu->irqs),
Will Deacon45ae7cf2013-06-24 18:31:25 +01002261 GFP_KERNEL);
2262 if (!smmu->irqs) {
2263 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2264 return -ENOMEM;
2265 }
2266
2267 for (i = 0; i < num_irqs; ++i) {
2268 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002269
Will Deacon45ae7cf2013-06-24 18:31:25 +01002270 if (irq < 0) {
2271 dev_err(dev, "failed to get irq index %d\n", i);
2272 return -ENODEV;
2273 }
2274 smmu->irqs[i] = irq;
2275 }
2276
Sricharan R96a299d2018-12-04 11:52:09 +05302277 err = devm_clk_bulk_get_all(dev, &smmu->clks);
2278 if (err < 0) {
2279 dev_err(dev, "failed to get clocks %d\n", err);
2280 return err;
2281 }
2282 smmu->num_clks = err;
2283
2284 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
2285 if (err)
2286 return err;
2287
Olav Haugan3c8766d2014-08-22 17:12:32 -07002288 err = arm_smmu_device_cfg_probe(smmu);
2289 if (err)
2290 return err;
2291
Vivek Gautamd1e20222018-07-19 23:23:56 +05302292 if (smmu->version == ARM_SMMU_V2) {
2293 if (smmu->num_context_banks > smmu->num_context_irqs) {
2294 dev_err(dev,
2295 "found only %d context irq(s) but %d required\n",
2296 smmu->num_context_irqs, smmu->num_context_banks);
2297 return -ENODEV;
2298 }
2299
2300 /* Ignore superfluous interrupts */
2301 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002302 }
2303
Will Deacon45ae7cf2013-06-24 18:31:25 +01002304 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08002305 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2306 arm_smmu_global_fault,
2307 IRQF_SHARED,
2308 "arm-smmu global fault",
2309 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002310 if (err) {
2311 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2312 i, smmu->irqs[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01002313 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002314 }
2315 }
2316
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002317 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2318 "smmu.%pa", &ioaddr);
2319 if (err) {
2320 dev_err(dev, "Failed to register iommu in sysfs\n");
2321 return err;
2322 }
2323
2324 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2325 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2326
2327 err = iommu_device_register(&smmu->iommu);
2328 if (err) {
2329 dev_err(dev, "Failed to register iommu\n");
2330 return err;
2331 }
2332
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002333 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01002334 arm_smmu_device_reset(smmu);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03002335 arm_smmu_test_smr_masks(smmu);
Robin Murphy021bb842016-09-14 15:26:46 +01002336
Robin Murphyf6810c12017-04-10 16:51:05 +05302337 /*
Sricharan Rd4a44f02018-12-04 11:52:10 +05302338 * We want to avoid touching dev->power.lock in fastpaths unless
2339 * it's really going to do something useful - pm_runtime_enabled()
2340 * can serve as an ideal proxy for that decision. So, conditionally
2341 * enable pm_runtime.
2342 */
2343 if (dev->pm_domain) {
2344 pm_runtime_set_active(dev);
2345 pm_runtime_enable(dev);
2346 }
2347
2348 /*
Robin Murphyf6810c12017-04-10 16:51:05 +05302349 * For ACPI and generic DT bindings, an SMMU will be probed before
2350 * any device which might need it, so we want the bus ops in place
2351 * ready to handle default domain setup as soon as any SMMU exists.
2352 */
2353 if (!using_legacy_binding)
2354 arm_smmu_bus_init();
2355
Will Deacon45ae7cf2013-06-24 18:31:25 +01002356 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002357}
2358
Robin Murphyf6810c12017-04-10 16:51:05 +05302359/*
2360 * With the legacy DT binding in play, though, we have no guarantees about
2361 * probe order, but then we're also not doing default domains, so we can
2362 * delay setting bus ops until we're sure every possible SMMU is ready,
2363 * and that way ensure that no add_device() calls get missed.
2364 */
2365static int arm_smmu_legacy_bus_init(void)
2366{
2367 if (using_legacy_binding)
2368 arm_smmu_bus_init();
2369 return 0;
2370}
2371device_initcall_sync(arm_smmu_legacy_bus_init);
2372
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002373static void arm_smmu_device_shutdown(struct platform_device *pdev)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002374{
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002375 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002376
2377 if (!smmu)
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002378 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002379
Will Deaconecfadb62013-07-31 19:21:28 +01002380 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002381 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002382
Sricharan Rd4a44f02018-12-04 11:52:10 +05302383 arm_smmu_rpm_get(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002384 /* Turn the thing off */
Robin Murphy00320ce2019-08-15 19:37:31 +01002385 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, sCR0_CLIENTPD);
Sricharan Rd4a44f02018-12-04 11:52:10 +05302386 arm_smmu_rpm_put(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302387
Sricharan Rd4a44f02018-12-04 11:52:10 +05302388 if (pm_runtime_enabled(smmu->dev))
2389 pm_runtime_force_suspend(smmu->dev);
2390 else
2391 clk_bulk_disable(smmu->num_clks, smmu->clks);
2392
2393 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
Nate Watterson7aa86192017-06-29 18:18:15 -04002394}
2395
Sricharan R96a299d2018-12-04 11:52:09 +05302396static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
Robin Murphya2d866f2017-08-08 14:56:15 +01002397{
2398 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
Sricharan R96a299d2018-12-04 11:52:09 +05302399 int ret;
2400
2401 ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
2402 if (ret)
2403 return ret;
Robin Murphya2d866f2017-08-08 14:56:15 +01002404
2405 arm_smmu_device_reset(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302406
Will Deacon45ae7cf2013-06-24 18:31:25 +01002407 return 0;
2408}
2409
Sricharan R96a299d2018-12-04 11:52:09 +05302410static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
Dan Carpenter6614ee72013-08-21 09:34:20 +01002411{
Sricharan R96a299d2018-12-04 11:52:09 +05302412 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2413
2414 clk_bulk_disable(smmu->num_clks, smmu->clks);
2415
2416 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002417}
2418
Robin Murphya2d866f2017-08-08 14:56:15 +01002419static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
2420{
Sricharan R96a299d2018-12-04 11:52:09 +05302421 if (pm_runtime_suspended(dev))
2422 return 0;
Robin Murphya2d866f2017-08-08 14:56:15 +01002423
Sricharan R96a299d2018-12-04 11:52:09 +05302424 return arm_smmu_runtime_resume(dev);
Robin Murphya2d866f2017-08-08 14:56:15 +01002425}
2426
Sricharan R96a299d2018-12-04 11:52:09 +05302427static int __maybe_unused arm_smmu_pm_suspend(struct device *dev)
2428{
2429 if (pm_runtime_suspended(dev))
2430 return 0;
2431
2432 return arm_smmu_runtime_suspend(dev);
2433}
2434
2435static const struct dev_pm_ops arm_smmu_pm_ops = {
2436 SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_pm_suspend, arm_smmu_pm_resume)
2437 SET_RUNTIME_PM_OPS(arm_smmu_runtime_suspend,
2438 arm_smmu_runtime_resume, NULL)
2439};
Robin Murphya2d866f2017-08-08 14:56:15 +01002440
Will Deacon45ae7cf2013-06-24 18:31:25 +01002441static struct platform_driver arm_smmu_driver = {
2442 .driver = {
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002443 .name = "arm-smmu",
2444 .of_match_table = of_match_ptr(arm_smmu_of_match),
2445 .pm = &arm_smmu_pm_ops,
2446 .suppress_bind_attrs = true,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002447 },
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002448 .probe = arm_smmu_device_probe,
Nate Watterson7aa86192017-06-29 18:18:15 -04002449 .shutdown = arm_smmu_device_shutdown,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002450};
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002451builtin_platform_driver(arm_smmu_driver);