blob: e72554f334eeb20ebca7e9e2f95cb1b138fe2d2d [file] [log] [blame]
Thomas Gleixner45051532019-05-29 16:57:47 -07001// SPDX-License-Identifier: GPL-2.0-only
Will Deacon45ae7cf2013-06-24 18:31:25 +01002/*
3 * IOMMU API for ARM architected SMMU implementations.
4 *
Will Deacon45ae7cf2013-06-24 18:31:25 +01005 * Copyright (C) 2013 ARM Limited
6 *
7 * Author: Will Deacon <will.deacon@arm.com>
8 *
9 * This driver currently supports:
10 * - SMMUv1 and v2 implementations
11 * - Stream-matching and stream-indexing
12 * - v7/v8 long-descriptor format
13 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010014 * - Context fault reporting
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +030015 * - Extended Stream ID (16 bit)
Will Deacon45ae7cf2013-06-24 18:31:25 +010016 */
17
18#define pr_fmt(fmt) "arm-smmu: " fmt
19
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +000020#include <linux/acpi.h>
21#include <linux/acpi_iort.h>
Robin Murphy1f3d5ca2016-09-12 17:13:49 +010022#include <linux/atomic.h>
Robin Murphy0caf5f42019-08-15 19:37:23 +010023#include <linux/bitfield.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010024#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000025#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010026#include <linux/dma-mapping.h>
27#include <linux/err.h>
28#include <linux/interrupt.h>
29#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010030#include <linux/io-64-nonatomic-hi-lo.h>
Rob Herringb77cf112019-02-05 10:37:31 -060031#include <linux/io-pgtable.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000033#include <linux/iopoll.h>
Paul Gortmakeraddb672f2018-12-01 14:19:16 -050034#include <linux/init.h>
35#include <linux/moduleparam.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010036#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010037#include <linux/of_address.h>
Robin Murphyd6fc5d92016-09-12 17:13:52 +010038#include <linux/of_device.h>
Robin Murphyadfec2e2016-09-12 17:13:55 +010039#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010040#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041#include <linux/platform_device.h>
Sricharan R96a299d2018-12-04 11:52:09 +053042#include <linux/pm_runtime.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010043#include <linux/slab.h>
44#include <linux/spinlock.h>
45
46#include <linux/amba/bus.h>
Nipun Guptaeab03e22018-09-10 19:19:18 +053047#include <linux/fsl/mc.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010048
Rob Clark2b037742017-08-09 10:43:03 -040049#include "arm-smmu-regs.h"
50
Robin Murphy4e4abae2019-06-03 14:15:37 +020051/*
52 * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
53 * global register space are still, in fact, using a hypervisor to mediate it
54 * by trapping and emulating register accesses. Sadly, some deployed versions
55 * of said trapping code have bugs wherein they go horribly wrong for stores
56 * using r31 (i.e. XZR/WZR) as the source register.
57 */
58#define QCOM_DUMMY_VAL -1
59
Rob Clark2b037742017-08-09 10:43:03 -040060#define ARM_MMU500_ACTLR_CPRE (1 << 1)
61
62#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
Feng Kan74f55d32017-10-11 15:08:39 -070063#define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10)
Rob Clark2b037742017-08-09 10:43:03 -040064#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
65
66#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
67#define TLB_SPIN_COUNT 10
Will Deacon45ae7cf2013-06-24 18:31:25 +010068
Will Deacon45ae7cf2013-06-24 18:31:25 +010069/* Maximum number of context banks per SMMU */
70#define ARM_SMMU_MAX_CBS 128
71
Will Deacon45ae7cf2013-06-24 18:31:25 +010072/* SMMU global address space */
73#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deacon45ae7cf2013-06-24 18:31:25 +010074
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000075/*
76 * SMMU global address space with conditional offset to access secure
77 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
78 * nsGFSYNR0: 0x450)
79 */
80#define ARM_SMMU_GR0_NS(smmu) \
81 ((smmu)->base + \
82 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
83 ? 0x400 : 0))
84
Eric Augerf3ebee82017-01-19 20:57:55 +000085#define MSI_IOVA_BASE 0x8000000
86#define MSI_IOVA_LENGTH 0x100000
87
Will Deacon4cf740b2014-07-14 19:47:39 +010088static int force_stage;
Paul Gortmakeraddb672f2018-12-01 14:19:16 -050089/*
90 * not really modular, but the easiest way to keep compat with existing
91 * bootargs behaviour is to continue using module_param() here.
92 */
Robin Murphy25a1c962016-02-10 14:25:33 +000093module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +010094MODULE_PARM_DESC(force_stage,
95 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Douglas Anderson954a03b2019-03-01 11:20:17 -080096static bool disable_bypass =
97 IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT);
Robin Murphy25a1c962016-02-10 14:25:33 +000098module_param(disable_bypass, bool, S_IRUGO);
99MODULE_PARM_DESC(disable_bypass,
100 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100101
Robin Murphy09360402014-08-28 17:51:59 +0100102enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100103 ARM_SMMU_V1,
104 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100105 ARM_SMMU_V2,
106};
107
Robin Murphy67b65a32016-04-13 18:12:57 +0100108enum arm_smmu_implementation {
109 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100110 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100111 CAVIUM_SMMUV2,
Vivek Gautam89cddc52018-12-04 11:52:13 +0530112 QCOM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100113};
114
Robin Murphy8e8b2032016-09-12 17:13:50 +0100115struct arm_smmu_s2cr {
Robin Murphy588888a2016-09-12 17:13:54 +0100116 struct iommu_group *group;
117 int count;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100118 enum arm_smmu_s2cr_type type;
119 enum arm_smmu_s2cr_privcfg privcfg;
120 u8 cbndx;
121};
122
123#define s2cr_init_val (struct arm_smmu_s2cr){ \
124 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
125}
126
Will Deacon45ae7cf2013-06-24 18:31:25 +0100127struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100128 u16 mask;
129 u16 id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100130 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100131};
132
Robin Murphy90df3732017-08-08 14:56:14 +0100133struct arm_smmu_cb {
134 u64 ttbr[2];
135 u32 tcr[2];
136 u32 mair[2];
137 struct arm_smmu_cfg *cfg;
138};
139
Will Deacona9a1b0b2014-05-01 18:05:08 +0100140struct arm_smmu_master_cfg {
Robin Murphyf80cd882016-09-14 15:21:39 +0100141 struct arm_smmu_device *smmu;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100142 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100143};
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100144#define INVALID_SMENDX -1
Robin Murphyadfec2e2016-09-12 17:13:55 +0100145#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
146#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000147#define fwspec_smendx(fw, i) \
148 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphyadfec2e2016-09-12 17:13:55 +0100149#define for_each_cfg_sme(fw, i, idx) \
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000150 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100151
152struct arm_smmu_device {
153 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100154
155 void __iomem *base;
Robin Murphy490325e2019-08-15 19:37:26 +0100156 unsigned int numpage;
157 unsigned int pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100158
159#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
160#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
161#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
162#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
163#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000164#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800165#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100166#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
167#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
168#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
169#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
170#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300171#define ARM_SMMU_FEAT_EXIDS (1 << 12)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100172 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000173
174#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
175 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100176 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100177 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100178
179 u32 num_context_banks;
180 u32 num_s2_context_banks;
181 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
Robin Murphy90df3732017-08-08 14:56:14 +0100182 struct arm_smmu_cb *cbs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100183 atomic_t irptndx;
184
185 u32 num_mapping_groups;
Robin Murphy21174242016-09-12 17:13:48 +0100186 u16 streamid_mask;
187 u16 smr_mask_mask;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100188 struct arm_smmu_smr *smrs;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100189 struct arm_smmu_s2cr *s2crs;
Robin Murphy588888a2016-09-12 17:13:54 +0100190 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100191
Will Deacon518f7132014-11-14 17:17:54 +0000192 unsigned long va_size;
193 unsigned long ipa_size;
194 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100195 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100196
197 u32 num_global_irqs;
198 u32 num_context_irqs;
199 unsigned int *irqs;
Sricharan R96a299d2018-12-04 11:52:09 +0530200 struct clk_bulk_data *clks;
201 int num_clks;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100202
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800203 u32 cavium_id_base; /* Specific to Cavium */
Joerg Roedel9648cbc2017-02-01 18:11:36 +0100204
Will Deacon8e517e72017-07-06 15:55:48 +0100205 spinlock_t global_sync_lock;
206
Joerg Roedel9648cbc2017-02-01 18:11:36 +0100207 /* IOMMU core code handle */
208 struct iommu_device iommu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100209};
210
Robin Murphy7602b872016-04-28 17:12:09 +0100211enum arm_smmu_context_fmt {
212 ARM_SMMU_CTX_FMT_NONE,
213 ARM_SMMU_CTX_FMT_AARCH64,
214 ARM_SMMU_CTX_FMT_AARCH32_L,
215 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100216};
217
218struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100219 u8 cbndx;
220 u8 irptndx;
Robin Murphy280b6832017-03-30 17:56:29 +0100221 union {
222 u16 asid;
223 u16 vmid;
224 };
Robin Murphy5114e962019-08-15 19:37:24 +0100225 enum arm_smmu_cbar_type cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100226 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100227};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100228#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100229
Will Deaconc752ce42014-06-25 22:46:31 +0100230enum arm_smmu_domain_stage {
231 ARM_SMMU_DOMAIN_S1 = 0,
232 ARM_SMMU_DOMAIN_S2,
233 ARM_SMMU_DOMAIN_NESTED,
Will Deacon61bc6712017-01-06 16:56:03 +0000234 ARM_SMMU_DOMAIN_BYPASS,
Will Deaconc752ce42014-06-25 22:46:31 +0100235};
236
Will Deacon45ae7cf2013-06-24 18:31:25 +0100237struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100238 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000239 struct io_pgtable_ops *pgtbl_ops;
Robin Murphy32b12442017-09-28 15:55:01 +0100240 const struct iommu_gather_ops *tlb_ops;
Will Deacon44680ee2014-06-25 11:29:12 +0100241 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100242 enum arm_smmu_domain_stage stage;
Robin Murphy44f68762018-09-20 17:10:27 +0100243 bool non_strict;
Will Deacon518f7132014-11-14 17:17:54 +0000244 struct mutex init_mutex; /* Protects smmu pointer */
Will Deacon8e517e72017-07-06 15:55:48 +0100245 spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
Joerg Roedel1d672632015-03-26 13:43:10 +0100246 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100247};
248
Robin Murphyaadbf212019-08-15 19:37:29 +0100249static void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n)
250{
251 return smmu->base + (n << smmu->pgshift);
252}
253
254static u32 arm_smmu_readl(struct arm_smmu_device *smmu, int page, int offset)
255{
256 return readl_relaxed(arm_smmu_page(smmu, page) + offset);
257}
258
259static void arm_smmu_writel(struct arm_smmu_device *smmu, int page, int offset,
260 u32 val)
261{
262 writel_relaxed(val, arm_smmu_page(smmu, page) + offset);
263}
264
Robin Murphy19713fd2019-08-15 19:37:30 +0100265static u64 arm_smmu_readq(struct arm_smmu_device *smmu, int page, int offset)
266{
267 return readq_relaxed(arm_smmu_page(smmu, page) + offset);
268}
269
270static void arm_smmu_writeq(struct arm_smmu_device *smmu, int page, int offset,
271 u64 val)
272{
273 writeq_relaxed(val, arm_smmu_page(smmu, page) + offset);
274}
275
Robin Murphyaadbf212019-08-15 19:37:29 +0100276#define ARM_SMMU_GR1 1
Robin Murphy19713fd2019-08-15 19:37:30 +0100277#define ARM_SMMU_CB(s, n) ((s)->numpage + (n))
Robin Murphyaadbf212019-08-15 19:37:29 +0100278
279#define arm_smmu_gr1_read(s, o) \
280 arm_smmu_readl((s), ARM_SMMU_GR1, (o))
281#define arm_smmu_gr1_write(s, o, v) \
282 arm_smmu_writel((s), ARM_SMMU_GR1, (o), (v))
283
Robin Murphy19713fd2019-08-15 19:37:30 +0100284#define arm_smmu_cb_read(s, n, o) \
285 arm_smmu_readl((s), ARM_SMMU_CB((s), (n)), (o))
286#define arm_smmu_cb_write(s, n, o, v) \
287 arm_smmu_writel((s), ARM_SMMU_CB((s), (n)), (o), (v))
288#define arm_smmu_cb_readq(s, n, o) \
289 arm_smmu_readq((s), ARM_SMMU_CB((s), (n)), (o))
290#define arm_smmu_cb_writeq(s, n, o, v) \
291 arm_smmu_writeq((s), ARM_SMMU_CB((s), (n)), (o), (v))
292
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000293struct arm_smmu_option_prop {
294 u32 opt;
295 const char *prop;
296};
297
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800298static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
299
Robin Murphy021bb842016-09-14 15:26:46 +0100300static bool using_legacy_binding, using_generic_binding;
301
Mitchel Humpherys29073202014-07-08 09:52:18 -0700302static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000303 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
304 { 0, NULL},
305};
306
Sricharan Rd4a44f02018-12-04 11:52:10 +0530307static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
308{
309 if (pm_runtime_enabled(smmu->dev))
310 return pm_runtime_get_sync(smmu->dev);
311
312 return 0;
313}
314
315static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
316{
317 if (pm_runtime_enabled(smmu->dev))
318 pm_runtime_put(smmu->dev);
319}
320
Joerg Roedel1d672632015-03-26 13:43:10 +0100321static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
322{
323 return container_of(dom, struct arm_smmu_domain, domain);
324}
325
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000326static void parse_driver_options(struct arm_smmu_device *smmu)
327{
328 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700329
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000330 do {
331 if (of_property_read_bool(smmu->dev->of_node,
332 arm_smmu_options[i].prop)) {
333 smmu->options |= arm_smmu_options[i].opt;
334 dev_notice(smmu->dev, "option %s\n",
335 arm_smmu_options[i].prop);
336 }
337 } while (arm_smmu_options[++i].opt);
338}
339
Will Deacon8f68f8e2014-07-15 11:27:08 +0100340static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100341{
342 if (dev_is_pci(dev)) {
343 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700344
Will Deacona9a1b0b2014-05-01 18:05:08 +0100345 while (!pci_is_root_bus(bus))
346 bus = bus->parent;
Robin Murphyf80cd882016-09-14 15:21:39 +0100347 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100348 }
349
Robin Murphyf80cd882016-09-14 15:21:39 +0100350 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100351}
352
Robin Murphyf80cd882016-09-14 15:21:39 +0100353static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100354{
Robin Murphyf80cd882016-09-14 15:21:39 +0100355 *((__be32 *)data) = cpu_to_be32(alias);
356 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100357}
358
Robin Murphyf80cd882016-09-14 15:21:39 +0100359static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100360{
Robin Murphyf80cd882016-09-14 15:21:39 +0100361 struct of_phandle_iterator *it = *(void **)data;
362 struct device_node *np = it->node;
363 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100364
Robin Murphyf80cd882016-09-14 15:21:39 +0100365 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
366 "#stream-id-cells", 0)
367 if (it->node == np) {
368 *(void **)data = dev;
369 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700370 }
Robin Murphyf80cd882016-09-14 15:21:39 +0100371 it->node = np;
372 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100373}
374
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100375static struct platform_driver arm_smmu_driver;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100376static struct iommu_ops arm_smmu_ops;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100377
Robin Murphyadfec2e2016-09-12 17:13:55 +0100378static int arm_smmu_register_legacy_master(struct device *dev,
379 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100380{
Robin Murphyadfec2e2016-09-12 17:13:55 +0100381 struct device *smmu_dev;
Robin Murphyf80cd882016-09-14 15:21:39 +0100382 struct device_node *np;
383 struct of_phandle_iterator it;
384 void *data = &it;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100385 u32 *sids;
Robin Murphyf80cd882016-09-14 15:21:39 +0100386 __be32 pci_sid;
387 int err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100388
Robin Murphyf80cd882016-09-14 15:21:39 +0100389 np = dev_get_dev_node(dev);
390 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
391 of_node_put(np);
392 return -ENODEV;
393 }
394
395 it.node = np;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100396 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
397 __find_legacy_master_phandle);
Robin Murphyadfec2e2016-09-12 17:13:55 +0100398 smmu_dev = data;
Robin Murphyf80cd882016-09-14 15:21:39 +0100399 of_node_put(np);
400 if (err == 0)
401 return -ENODEV;
402 if (err < 0)
403 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100404
Robin Murphyf80cd882016-09-14 15:21:39 +0100405 if (dev_is_pci(dev)) {
406 /* "mmu-masters" assumes Stream ID == Requester ID */
407 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
408 &pci_sid);
409 it.cur = &pci_sid;
410 it.cur_count = 1;
411 }
412
Robin Murphyadfec2e2016-09-12 17:13:55 +0100413 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
414 &arm_smmu_ops);
415 if (err)
416 return err;
417
418 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
419 if (!sids)
Robin Murphyf80cd882016-09-14 15:21:39 +0100420 return -ENOMEM;
421
Robin Murphyadfec2e2016-09-12 17:13:55 +0100422 *smmu = dev_get_drvdata(smmu_dev);
423 of_phandle_iterator_args(&it, sids, it.cur_count);
424 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
425 kfree(sids);
426 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100427}
428
429static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
430{
431 int idx;
432
433 do {
434 idx = find_next_zero_bit(map, end, start);
435 if (idx == end)
436 return -ENOSPC;
437 } while (test_and_set_bit(idx, map));
438
439 return idx;
440}
441
442static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
443{
444 clear_bit(idx, map);
445}
446
447/* Wait for any pending TLB invalidations to complete */
Robin Murphy19713fd2019-08-15 19:37:30 +0100448static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
449 int sync, int status)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100450{
Robin Murphy8513c892017-03-30 17:56:32 +0100451 unsigned int spin_cnt, delay;
Robin Murphy19713fd2019-08-15 19:37:30 +0100452 u32 reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100453
Robin Murphy19713fd2019-08-15 19:37:30 +0100454 arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
Robin Murphy8513c892017-03-30 17:56:32 +0100455 for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
456 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100457 reg = arm_smmu_readl(smmu, page, status);
458 if (!(reg & sTLBGSTATUS_GSACTIVE))
Robin Murphy8513c892017-03-30 17:56:32 +0100459 return;
460 cpu_relax();
Will Deacon45ae7cf2013-06-24 18:31:25 +0100461 }
Robin Murphy8513c892017-03-30 17:56:32 +0100462 udelay(delay);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100463 }
Robin Murphy8513c892017-03-30 17:56:32 +0100464 dev_err_ratelimited(smmu->dev,
465 "TLB sync timed out -- SMMU may be deadlocked\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +0100466}
467
Robin Murphy11febfc2017-03-30 17:56:31 +0100468static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
Will Deacon1463fe42013-07-31 19:21:27 +0100469{
Will Deacon8e517e72017-07-06 15:55:48 +0100470 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100471
Will Deacon8e517e72017-07-06 15:55:48 +0100472 spin_lock_irqsave(&smmu->global_sync_lock, flags);
Robin Murphy19713fd2019-08-15 19:37:30 +0100473 __arm_smmu_tlb_sync(smmu, 0, ARM_SMMU_GR0_sTLBGSYNC,
474 ARM_SMMU_GR0_sTLBGSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100475 spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000476}
477
Robin Murphy11febfc2017-03-30 17:56:31 +0100478static void arm_smmu_tlb_sync_context(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100479{
Will Deacon518f7132014-11-14 17:17:54 +0000480 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy11febfc2017-03-30 17:56:31 +0100481 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon8e517e72017-07-06 15:55:48 +0100482 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100483
Will Deacon8e517e72017-07-06 15:55:48 +0100484 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy19713fd2019-08-15 19:37:30 +0100485 __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx),
486 ARM_SMMU_CB_TLBSYNC, ARM_SMMU_CB_TLBSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100487 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000488}
489
Robin Murphy11febfc2017-03-30 17:56:31 +0100490static void arm_smmu_tlb_sync_vmid(void *cookie)
491{
492 struct arm_smmu_domain *smmu_domain = cookie;
493
494 arm_smmu_tlb_sync_global(smmu_domain->smmu);
495}
496
497static void arm_smmu_tlb_inv_context_s1(void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000498{
499 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy44f68762018-09-20 17:10:27 +0100500 /*
Robin Murphy19713fd2019-08-15 19:37:30 +0100501 * The TLBI write may be relaxed, so ensure that PTEs cleared by the
502 * current CPU are visible beforehand.
Robin Murphy44f68762018-09-20 17:10:27 +0100503 */
Robin Murphy19713fd2019-08-15 19:37:30 +0100504 wmb();
505 arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
506 ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
Robin Murphy11febfc2017-03-30 17:56:31 +0100507 arm_smmu_tlb_sync_context(cookie);
508}
509
510static void arm_smmu_tlb_inv_context_s2(void *cookie)
511{
512 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100513 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy11febfc2017-03-30 17:56:31 +0100514 void __iomem *base = ARM_SMMU_GR0(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100515
Robin Murphy44f68762018-09-20 17:10:27 +0100516 /* NOTE: see above */
517 writel(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
Robin Murphy11febfc2017-03-30 17:56:31 +0100518 arm_smmu_tlb_sync_global(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100519}
520
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100521static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
522 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000523{
524 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100525 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000526 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy19713fd2019-08-15 19:37:30 +0100527 int reg, idx = cfg->cbndx;
Will Deacon518f7132014-11-14 17:17:54 +0000528
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100529 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
Will Deacon7d321bd32018-10-01 12:42:49 +0100530 wmb();
531
Robin Murphy19713fd2019-08-15 19:37:30 +0100532 reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
Will Deacon518f7132014-11-14 17:17:54 +0000533
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100534 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
535 iova = (iova >> 12) << 12;
536 iova |= cfg->asid;
Robin Murphy75df1382015-12-07 18:18:52 +0000537 do {
Robin Murphy19713fd2019-08-15 19:37:30 +0100538 arm_smmu_cb_write(smmu, idx, reg, iova);
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100539 iova += granule;
540 } while (size -= granule);
541 } else {
542 iova >>= 12;
543 iova |= (u64)cfg->asid << 48;
544 do {
Robin Murphy19713fd2019-08-15 19:37:30 +0100545 arm_smmu_cb_writeq(smmu, idx, reg, iova);
Robin Murphy75df1382015-12-07 18:18:52 +0000546 iova += granule >> 12;
547 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000548 }
549}
550
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100551static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
552 size_t granule, bool leaf, void *cookie)
553{
554 struct arm_smmu_domain *smmu_domain = cookie;
555 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy19713fd2019-08-15 19:37:30 +0100556 int reg, idx = smmu_domain->cfg.cbndx;
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100557
558 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
559 wmb();
560
Robin Murphy19713fd2019-08-15 19:37:30 +0100561 reg = leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100562 iova >>= 12;
563 do {
Robin Murphy61005762019-08-15 19:37:28 +0100564 if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy19713fd2019-08-15 19:37:30 +0100565 arm_smmu_cb_writeq(smmu, idx, reg, iova);
Robin Murphy61005762019-08-15 19:37:28 +0100566 else
Robin Murphy19713fd2019-08-15 19:37:30 +0100567 arm_smmu_cb_write(smmu, idx, reg, iova);
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100568 iova += granule >> 12;
569 } while (size -= granule);
570}
571
Robin Murphy11febfc2017-03-30 17:56:31 +0100572/*
573 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
574 * almost negligible, but the benefit of getting the first one in as far ahead
575 * of the sync as possible is significant, hence we don't just make this a
576 * no-op and set .tlb_sync to arm_smmu_inv_context_s2() as you might think.
577 */
578static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
579 size_t granule, bool leaf, void *cookie)
580{
581 struct arm_smmu_domain *smmu_domain = cookie;
582 void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
583
Will Deacon7d321bd32018-10-01 12:42:49 +0100584 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
585 wmb();
586
Robin Murphy11febfc2017-03-30 17:56:31 +0100587 writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
588}
589
590static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = {
591 .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100592 .tlb_add_flush = arm_smmu_tlb_inv_range_s1,
Robin Murphy11febfc2017-03-30 17:56:31 +0100593 .tlb_sync = arm_smmu_tlb_sync_context,
594};
595
596static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = {
597 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100598 .tlb_add_flush = arm_smmu_tlb_inv_range_s2,
Robin Murphy11febfc2017-03-30 17:56:31 +0100599 .tlb_sync = arm_smmu_tlb_sync_context,
600};
601
602static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = {
603 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
604 .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync,
605 .tlb_sync = arm_smmu_tlb_sync_vmid,
Will Deacon518f7132014-11-14 17:17:54 +0000606};
607
Will Deacon45ae7cf2013-06-24 18:31:25 +0100608static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
609{
Vivek Gautambc580b52019-04-22 12:40:36 +0530610 u32 fsr, fsynr, cbfrsynra;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100611 unsigned long iova;
612 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100613 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100614 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy19713fd2019-08-15 19:37:30 +0100615 int idx = smmu_domain->cfg.cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100616
Robin Murphy19713fd2019-08-15 19:37:30 +0100617 fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100618 if (!(fsr & FSR_FAULT))
619 return IRQ_NONE;
620
Robin Murphy19713fd2019-08-15 19:37:30 +0100621 fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
622 iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
623 cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100624
Will Deacon3714ce1d2016-08-05 19:49:45 +0100625 dev_err_ratelimited(smmu->dev,
Vivek Gautambc580b52019-04-22 12:40:36 +0530626 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
Robin Murphy19713fd2019-08-15 19:37:30 +0100627 fsr, iova, fsynr, cbfrsynra, idx);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100628
Robin Murphy19713fd2019-08-15 19:37:30 +0100629 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100630 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100631}
632
633static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
634{
635 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
636 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000637 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100638
639 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
640 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
641 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
642 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
643
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000644 if (!gfsr)
645 return IRQ_NONE;
646
Will Deacon45ae7cf2013-06-24 18:31:25 +0100647 dev_err_ratelimited(smmu->dev,
648 "Unexpected global fault, this could be serious\n");
649 dev_err_ratelimited(smmu->dev,
650 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
651 gfsr, gfsynr0, gfsynr1, gfsynr2);
652
653 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100654 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100655}
656
Will Deacon518f7132014-11-14 17:17:54 +0000657static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
658 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100659{
Will Deacon44680ee2014-06-25 11:29:12 +0100660 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy90df3732017-08-08 14:56:14 +0100661 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
662 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
663
664 cb->cfg = cfg;
665
Robin Murphy620565a2019-08-15 19:37:25 +0100666 /* TCR */
Robin Murphy90df3732017-08-08 14:56:14 +0100667 if (stage1) {
668 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
669 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
670 } else {
671 cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
672 cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Robin Murphy620565a2019-08-15 19:37:25 +0100673 cb->tcr[1] |= FIELD_PREP(TCR2_SEP, TCR2_SEP_UPSTREAM);
Robin Murphy90df3732017-08-08 14:56:14 +0100674 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy620565a2019-08-15 19:37:25 +0100675 cb->tcr[1] |= TCR2_AS;
Robin Murphy90df3732017-08-08 14:56:14 +0100676 }
677 } else {
678 cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
679 }
680
681 /* TTBRs */
682 if (stage1) {
683 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
684 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
685 cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
686 } else {
687 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Robin Murphy620565a2019-08-15 19:37:25 +0100688 cb->ttbr[0] |= FIELD_PREP(TTBRn_ASID, cfg->asid);
Robin Murphy90df3732017-08-08 14:56:14 +0100689 cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Robin Murphy620565a2019-08-15 19:37:25 +0100690 cb->ttbr[1] |= FIELD_PREP(TTBRn_ASID, cfg->asid);
Robin Murphy90df3732017-08-08 14:56:14 +0100691 }
692 } else {
693 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
694 }
695
696 /* MAIRs (stage-1 only) */
697 if (stage1) {
698 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
699 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
700 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
701 } else {
702 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
703 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
704 }
705 }
706}
707
708static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
709{
710 u32 reg;
711 bool stage1;
712 struct arm_smmu_cb *cb = &smmu->cbs[idx];
713 struct arm_smmu_cfg *cfg = cb->cfg;
Robin Murphy90df3732017-08-08 14:56:14 +0100714
715 /* Unassigned context banks only need disabling */
716 if (!cfg) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100717 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, 0);
Robin Murphy90df3732017-08-08 14:56:14 +0100718 return;
719 }
720
Will Deacon44680ee2014-06-25 11:29:12 +0100721 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100722
Robin Murphy90df3732017-08-08 14:56:14 +0100723 /* CBA2R */
Will Deacon4a1c93c2015-03-04 12:21:03 +0000724 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100725 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy5114e962019-08-15 19:37:24 +0100726 reg = CBA2R_VA64;
Robin Murphy7602b872016-04-28 17:12:09 +0100727 else
Robin Murphy5114e962019-08-15 19:37:24 +0100728 reg = 0;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800729 /* 16-bit VMIDs live in CBA2R */
730 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Robin Murphy5114e962019-08-15 19:37:24 +0100731 reg |= FIELD_PREP(CBA2R_VMID16, cfg->vmid);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800732
Robin Murphyaadbf212019-08-15 19:37:29 +0100733 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBA2R(idx), reg);
Will Deacon4a1c93c2015-03-04 12:21:03 +0000734 }
735
Will Deacon45ae7cf2013-06-24 18:31:25 +0100736 /* CBAR */
Robin Murphy5114e962019-08-15 19:37:24 +0100737 reg = FIELD_PREP(CBAR_TYPE, cfg->cbar);
Robin Murphyb7862e32016-04-13 18:13:03 +0100738 if (smmu->version < ARM_SMMU_V2)
Robin Murphy5114e962019-08-15 19:37:24 +0100739 reg |= FIELD_PREP(CBAR_IRPTNDX, cfg->irptndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100740
Will Deacon57ca90f2014-02-06 14:59:05 +0000741 /*
742 * Use the weakest shareability/memory types, so they are
743 * overridden by the ttbcr/pte.
744 */
745 if (stage1) {
Robin Murphy5114e962019-08-15 19:37:24 +0100746 reg |= FIELD_PREP(CBAR_S1_BPSHCFG, CBAR_S1_BPSHCFG_NSH) |
747 FIELD_PREP(CBAR_S1_MEMATTR, CBAR_S1_MEMATTR_WB);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800748 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
749 /* 8-bit VMIDs live in CBAR */
Robin Murphy5114e962019-08-15 19:37:24 +0100750 reg |= FIELD_PREP(CBAR_VMID, cfg->vmid);
Will Deacon57ca90f2014-02-06 14:59:05 +0000751 }
Robin Murphyaadbf212019-08-15 19:37:29 +0100752 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100753
Sunil Goutham125458a2017-03-28 16:11:12 +0530754 /*
Robin Murphy620565a2019-08-15 19:37:25 +0100755 * TCR
Sunil Goutham125458a2017-03-28 16:11:12 +0530756 * We must write this before the TTBRs, since it determines the
757 * access behaviour of some fields (in particular, ASID[15:8]).
758 */
Robin Murphy90df3732017-08-08 14:56:14 +0100759 if (stage1 && smmu->version > ARM_SMMU_V1)
Robin Murphy19713fd2019-08-15 19:37:30 +0100760 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]);
761 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100762
Will Deacon45ae7cf2013-06-24 18:31:25 +0100763 /* TTBRs */
Robin Murphy90df3732017-08-08 14:56:14 +0100764 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100765 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid);
766 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
767 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100768 } else {
Robin Murphy19713fd2019-08-15 19:37:30 +0100769 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
Robin Murphy90df3732017-08-08 14:56:14 +0100770 if (stage1)
Robin Murphy19713fd2019-08-15 19:37:30 +0100771 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR1,
772 cb->ttbr[1]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100773 }
774
Will Deacon518f7132014-11-14 17:17:54 +0000775 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100776 if (stage1) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100777 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]);
778 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100779 }
780
Will Deacon45ae7cf2013-06-24 18:31:25 +0100781 /* SCTLR */
Robin Murphy60705292016-08-11 17:44:06 +0100782 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100783 if (stage1)
784 reg |= SCTLR_S1_ASIDPNE;
Robin Murphy90df3732017-08-08 14:56:14 +0100785 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
786 reg |= SCTLR_E;
787
Robin Murphy19713fd2019-08-15 19:37:30 +0100788 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100789}
790
791static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100792 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100793{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100794 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000795 unsigned long ias, oas;
796 struct io_pgtable_ops *pgtbl_ops;
797 struct io_pgtable_cfg pgtbl_cfg;
798 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100799 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100800 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100801
Will Deacon518f7132014-11-14 17:17:54 +0000802 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100803 if (smmu_domain->smmu)
804 goto out_unlock;
805
Will Deacon61bc6712017-01-06 16:56:03 +0000806 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
807 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
808 smmu_domain->smmu = smmu;
809 goto out_unlock;
810 }
811
Will Deaconc752ce42014-06-25 22:46:31 +0100812 /*
813 * Mapping the requested stage onto what we support is surprisingly
814 * complicated, mainly because the spec allows S1+S2 SMMUs without
815 * support for nested translation. That means we end up with the
816 * following table:
817 *
818 * Requested Supported Actual
819 * S1 N S1
820 * S1 S1+S2 S1
821 * S1 S2 S2
822 * S1 S1 S1
823 * N N N
824 * N S1+S2 S2
825 * N S2 S2
826 * N S1 S1
827 *
828 * Note that you can't actually request stage-2 mappings.
829 */
830 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
831 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
832 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
833 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
834
Robin Murphy7602b872016-04-28 17:12:09 +0100835 /*
836 * Choosing a suitable context format is even more fiddly. Until we
837 * grow some way for the caller to express a preference, and/or move
838 * the decision into the io-pgtable code where it arguably belongs,
839 * just aim for the closest thing to the rest of the system, and hope
840 * that the hardware isn't esoteric enough that we can't assume AArch64
841 * support to be a superset of AArch32 support...
842 */
843 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
844 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100845 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
846 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
847 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
848 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
849 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100850 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
851 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
852 ARM_SMMU_FEAT_FMT_AARCH64_16K |
853 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
854 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
855
856 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
857 ret = -EINVAL;
858 goto out_unlock;
859 }
860
Will Deaconc752ce42014-06-25 22:46:31 +0100861 switch (smmu_domain->stage) {
862 case ARM_SMMU_DOMAIN_S1:
863 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
864 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000865 ias = smmu->va_size;
866 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100867 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000868 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +0100869 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +0000870 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100871 ias = min(ias, 32UL);
872 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +0100873 } else {
874 fmt = ARM_V7S;
875 ias = min(ias, 32UL);
876 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +0100877 }
Robin Murphy32b12442017-09-28 15:55:01 +0100878 smmu_domain->tlb_ops = &arm_smmu_s1_tlb_ops;
Will Deaconc752ce42014-06-25 22:46:31 +0100879 break;
880 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100881 /*
882 * We will likely want to change this if/when KVM gets
883 * involved.
884 */
Will Deaconc752ce42014-06-25 22:46:31 +0100885 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100886 cfg->cbar = CBAR_TYPE_S2_TRANS;
887 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000888 ias = smmu->ipa_size;
889 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100890 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000891 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100892 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000893 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100894 ias = min(ias, 40UL);
895 oas = min(oas, 40UL);
896 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100897 if (smmu->version == ARM_SMMU_V2)
Robin Murphy32b12442017-09-28 15:55:01 +0100898 smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v2;
Robin Murphy11febfc2017-03-30 17:56:31 +0100899 else
Robin Murphy32b12442017-09-28 15:55:01 +0100900 smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v1;
Will Deaconc752ce42014-06-25 22:46:31 +0100901 break;
902 default:
903 ret = -EINVAL;
904 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100905 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100906 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
907 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200908 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100909 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100910
Will Deacon44680ee2014-06-25 11:29:12 +0100911 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100912 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100913 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
914 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100915 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100916 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100917 }
918
Robin Murphy280b6832017-03-30 17:56:29 +0100919 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
920 cfg->vmid = cfg->cbndx + 1 + smmu->cavium_id_base;
921 else
922 cfg->asid = cfg->cbndx + smmu->cavium_id_base;
923
Will Deacon518f7132014-11-14 17:17:54 +0000924 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100925 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000926 .ias = ias,
927 .oas = oas,
Will Deacon4f418452019-06-25 12:51:25 +0100928 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
Robin Murphy32b12442017-09-28 15:55:01 +0100929 .tlb = smmu_domain->tlb_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100930 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000931 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100932
Robin Murphy44f68762018-09-20 17:10:27 +0100933 if (smmu_domain->non_strict)
934 pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
935
Will Deacon518f7132014-11-14 17:17:54 +0000936 smmu_domain->smmu = smmu;
937 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
938 if (!pgtbl_ops) {
939 ret = -ENOMEM;
940 goto out_clear_smmu;
941 }
942
Robin Murphyd5466352016-05-09 17:20:09 +0100943 /* Update the domain's page sizes to reflect the page table format */
944 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Robin Murphy455eb7d2016-09-12 17:13:58 +0100945 domain->geometry.aperture_end = (1UL << ias) - 1;
946 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +0000947
948 /* Initialise the context bank with our page table cfg */
949 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
Robin Murphy90df3732017-08-08 14:56:14 +0100950 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000951
952 /*
953 * Request context fault interrupt. Do this last to avoid the
954 * handler seeing a half-initialised domain state.
955 */
Will Deacon44680ee2014-06-25 11:29:12 +0100956 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800957 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
958 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200959 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100960 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100961 cfg->irptndx, irq);
962 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100963 }
964
Will Deacon518f7132014-11-14 17:17:54 +0000965 mutex_unlock(&smmu_domain->init_mutex);
966
967 /* Publish page table ops for map/unmap */
968 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100969 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100970
Will Deacon518f7132014-11-14 17:17:54 +0000971out_clear_smmu:
972 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100973out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +0000974 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100975 return ret;
976}
977
978static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
979{
Joerg Roedel1d672632015-03-26 13:43:10 +0100980 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100981 struct arm_smmu_device *smmu = smmu_domain->smmu;
982 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Sricharan Rd4a44f02018-12-04 11:52:10 +0530983 int ret, irq;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100984
Will Deacon61bc6712017-01-06 16:56:03 +0000985 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100986 return;
987
Sricharan Rd4a44f02018-12-04 11:52:10 +0530988 ret = arm_smmu_rpm_get(smmu);
989 if (ret < 0)
990 return;
991
Will Deacon518f7132014-11-14 17:17:54 +0000992 /*
993 * Disable the context bank and free the page tables before freeing
994 * it.
995 */
Robin Murphy90df3732017-08-08 14:56:14 +0100996 smmu->cbs[cfg->cbndx].cfg = NULL;
997 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100998
Will Deacon44680ee2014-06-25 11:29:12 +0100999 if (cfg->irptndx != INVALID_IRPTNDX) {
1000 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001001 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001002 }
1003
Markus Elfring44830b02015-11-06 18:32:41 +01001004 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +01001005 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301006
1007 arm_smmu_rpm_put(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001008}
1009
Joerg Roedel1d672632015-03-26 13:43:10 +01001010static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001011{
1012 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001013
Will Deacon61bc6712017-01-06 16:56:03 +00001014 if (type != IOMMU_DOMAIN_UNMANAGED &&
1015 type != IOMMU_DOMAIN_DMA &&
1016 type != IOMMU_DOMAIN_IDENTITY)
Joerg Roedel1d672632015-03-26 13:43:10 +01001017 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001018 /*
1019 * Allocate the domain and initialise some of its data structures.
1020 * We can't really do anything meaningful until we've added a
1021 * master.
1022 */
1023 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1024 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001025 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001026
Robin Murphy021bb842016-09-14 15:26:46 +01001027 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
1028 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +00001029 kfree(smmu_domain);
1030 return NULL;
1031 }
1032
Will Deacon518f7132014-11-14 17:17:54 +00001033 mutex_init(&smmu_domain->init_mutex);
Robin Murphy523d7422017-06-22 16:53:56 +01001034 spin_lock_init(&smmu_domain->cb_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001035
1036 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001037}
1038
Joerg Roedel1d672632015-03-26 13:43:10 +01001039static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001040{
Joerg Roedel1d672632015-03-26 13:43:10 +01001041 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001042
1043 /*
1044 * Free the domain resources. We assume that all devices have
1045 * already been detached.
1046 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001047 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001048 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001049 kfree(smmu_domain);
1050}
1051
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001052static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1053{
1054 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphy0caf5f42019-08-15 19:37:23 +01001055 u32 reg = FIELD_PREP(SMR_ID, smr->id) | FIELD_PREP(SMR_MASK, smr->mask);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001056
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001057 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001058 reg |= SMR_VALID;
1059 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1060}
1061
Robin Murphy8e8b2032016-09-12 17:13:50 +01001062static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1063{
1064 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
Robin Murphy0caf5f42019-08-15 19:37:23 +01001065 u32 reg = FIELD_PREP(S2CR_TYPE, s2cr->type) |
1066 FIELD_PREP(S2CR_CBNDX, s2cr->cbndx) |
1067 FIELD_PREP(S2CR_PRIVCFG, s2cr->privcfg);
Robin Murphy8e8b2032016-09-12 17:13:50 +01001068
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001069 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
1070 smmu->smrs[idx].valid)
1071 reg |= S2CR_EXIDVALID;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001072 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1073}
1074
1075static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1076{
1077 arm_smmu_write_s2cr(smmu, idx);
1078 if (smmu->smrs)
1079 arm_smmu_write_smr(smmu, idx);
1080}
1081
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001082/*
1083 * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
1084 * should be called after sCR0 is written.
1085 */
1086static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
1087{
1088 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1089 u32 smr;
1090
1091 if (!smmu->smrs)
1092 return;
1093
1094 /*
1095 * SMR.ID bits may not be preserved if the corresponding MASK
1096 * bits are set, so check each one separately. We can reject
1097 * masters later if they try to claim IDs outside these masks.
1098 */
Robin Murphy0caf5f42019-08-15 19:37:23 +01001099 smr = FIELD_PREP(SMR_ID, smmu->streamid_mask);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001100 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1101 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
Robin Murphy0caf5f42019-08-15 19:37:23 +01001102 smmu->streamid_mask = FIELD_GET(SMR_ID, smr);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001103
Robin Murphy0caf5f42019-08-15 19:37:23 +01001104 smr = FIELD_PREP(SMR_MASK, smmu->streamid_mask);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001105 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1106 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
Robin Murphy0caf5f42019-08-15 19:37:23 +01001107 smmu->smr_mask_mask = FIELD_GET(SMR_MASK, smr);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001108}
1109
Robin Murphy588888a2016-09-12 17:13:54 +01001110static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001111{
1112 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy588888a2016-09-12 17:13:54 +01001113 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001114
Robin Murphy588888a2016-09-12 17:13:54 +01001115 /* Stream indexing is blissfully easy */
1116 if (!smrs)
1117 return id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001118
Robin Murphy588888a2016-09-12 17:13:54 +01001119 /* Validating SMRs is... less so */
1120 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1121 if (!smrs[i].valid) {
1122 /*
1123 * Note the first free entry we come across, which
1124 * we'll claim in the end if nothing else matches.
1125 */
1126 if (free_idx < 0)
1127 free_idx = i;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001128 continue;
1129 }
Robin Murphy588888a2016-09-12 17:13:54 +01001130 /*
1131 * If the new entry is _entirely_ matched by an existing entry,
1132 * then reuse that, with the guarantee that there also cannot
1133 * be any subsequent conflicting entries. In normal use we'd
1134 * expect simply identical entries for this case, but there's
1135 * no harm in accommodating the generalisation.
1136 */
1137 if ((mask & smrs[i].mask) == mask &&
1138 !((id ^ smrs[i].id) & ~smrs[i].mask))
1139 return i;
1140 /*
1141 * If the new entry has any other overlap with an existing one,
1142 * though, then there always exists at least one stream ID
1143 * which would cause a conflict, and we can't allow that risk.
1144 */
1145 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1146 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001147 }
1148
Robin Murphy588888a2016-09-12 17:13:54 +01001149 return free_idx;
1150}
1151
1152static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1153{
1154 if (--smmu->s2crs[idx].count)
1155 return false;
1156
1157 smmu->s2crs[idx] = s2cr_init_val;
1158 if (smmu->smrs)
1159 smmu->smrs[idx].valid = false;
1160
1161 return true;
1162}
1163
1164static int arm_smmu_master_alloc_smes(struct device *dev)
1165{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001166 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001167 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy588888a2016-09-12 17:13:54 +01001168 struct arm_smmu_device *smmu = cfg->smmu;
1169 struct arm_smmu_smr *smrs = smmu->smrs;
1170 struct iommu_group *group;
1171 int i, idx, ret;
1172
1173 mutex_lock(&smmu->stream_map_mutex);
1174 /* Figure out a viable stream map entry allocation */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001175 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001176 u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]);
1177 u16 mask = FIELD_GET(SMR_MASK, fwspec->ids[i]);
Robin Murphy021bb842016-09-14 15:26:46 +01001178
Robin Murphy588888a2016-09-12 17:13:54 +01001179 if (idx != INVALID_SMENDX) {
1180 ret = -EEXIST;
1181 goto out_err;
1182 }
1183
Robin Murphy021bb842016-09-14 15:26:46 +01001184 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy588888a2016-09-12 17:13:54 +01001185 if (ret < 0)
1186 goto out_err;
1187
1188 idx = ret;
1189 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy021bb842016-09-14 15:26:46 +01001190 smrs[idx].id = sid;
1191 smrs[idx].mask = mask;
Robin Murphy588888a2016-09-12 17:13:54 +01001192 smrs[idx].valid = true;
1193 }
1194 smmu->s2crs[idx].count++;
1195 cfg->smendx[i] = (s16)idx;
1196 }
1197
1198 group = iommu_group_get_for_dev(dev);
1199 if (!group)
1200 group = ERR_PTR(-ENOMEM);
1201 if (IS_ERR(group)) {
1202 ret = PTR_ERR(group);
1203 goto out_err;
1204 }
1205 iommu_group_put(group);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001206
Will Deacon45ae7cf2013-06-24 18:31:25 +01001207 /* It worked! Now, poke the actual hardware */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001208 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001209 arm_smmu_write_sme(smmu, idx);
1210 smmu->s2crs[idx].group = group;
1211 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001212
Robin Murphy588888a2016-09-12 17:13:54 +01001213 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001214 return 0;
1215
Robin Murphy588888a2016-09-12 17:13:54 +01001216out_err:
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001217 while (i--) {
Robin Murphy588888a2016-09-12 17:13:54 +01001218 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001219 cfg->smendx[i] = INVALID_SMENDX;
1220 }
Robin Murphy588888a2016-09-12 17:13:54 +01001221 mutex_unlock(&smmu->stream_map_mutex);
1222 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001223}
1224
Robin Murphyadfec2e2016-09-12 17:13:55 +01001225static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001226{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001227 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1228 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphyd3097e32016-09-12 17:13:53 +01001229 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001230
Robin Murphy588888a2016-09-12 17:13:54 +01001231 mutex_lock(&smmu->stream_map_mutex);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001232 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001233 if (arm_smmu_free_sme(smmu, idx))
1234 arm_smmu_write_sme(smmu, idx);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001235 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001236 }
Robin Murphy588888a2016-09-12 17:13:54 +01001237 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001238}
1239
Will Deacon45ae7cf2013-06-24 18:31:25 +01001240static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphyadfec2e2016-09-12 17:13:55 +01001241 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001242{
Will Deacon44680ee2014-06-25 11:29:12 +01001243 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001244 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001245 u8 cbndx = smmu_domain->cfg.cbndx;
Will Deacon61bc6712017-01-06 16:56:03 +00001246 enum arm_smmu_s2cr_type type;
Robin Murphy588888a2016-09-12 17:13:54 +01001247 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001248
Will Deacon61bc6712017-01-06 16:56:03 +00001249 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
1250 type = S2CR_TYPE_BYPASS;
1251 else
1252 type = S2CR_TYPE_TRANS;
1253
Robin Murphyadfec2e2016-09-12 17:13:55 +01001254 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy8e8b2032016-09-12 17:13:50 +01001255 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy588888a2016-09-12 17:13:54 +01001256 continue;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001257
Robin Murphy8e8b2032016-09-12 17:13:50 +01001258 s2cr[idx].type = type;
Sricharan Re1989802017-01-06 18:58:15 +05301259 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001260 s2cr[idx].cbndx = cbndx;
1261 arm_smmu_write_s2cr(smmu, idx);
Will Deacon43b412b2014-07-15 11:22:24 +01001262 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001263 return 0;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001264}
1265
Will Deacon45ae7cf2013-06-24 18:31:25 +01001266static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1267{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001268 int ret;
Joerg Roedel9b468f72018-11-29 14:01:00 +01001269 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001270 struct arm_smmu_device *smmu;
Joerg Roedel1d672632015-03-26 13:43:10 +01001271 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001272
Robin Murphyadfec2e2016-09-12 17:13:55 +01001273 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001274 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1275 return -ENXIO;
1276 }
1277
Robin Murphyfba4f8e2016-10-17 12:06:21 +01001278 /*
1279 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1280 * domains between of_xlate() and add_device() - we have no way to cope
1281 * with that, so until ARM gets converted to rely on groups and default
1282 * domains, just say no (but more politely than by dereferencing NULL).
1283 * This should be at least a WARN_ON once that's sorted.
1284 */
1285 if (!fwspec->iommu_priv)
1286 return -ENODEV;
1287
Robin Murphyadfec2e2016-09-12 17:13:55 +01001288 smmu = fwspec_smmu(fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301289
1290 ret = arm_smmu_rpm_get(smmu);
1291 if (ret < 0)
1292 return ret;
1293
Will Deacon518f7132014-11-14 17:17:54 +00001294 /* Ensure that the domain is finalised */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001295 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001296 if (ret < 0)
Sricharan Rd4a44f02018-12-04 11:52:10 +05301297 goto rpm_put;
Will Deacon518f7132014-11-14 17:17:54 +00001298
Will Deacon45ae7cf2013-06-24 18:31:25 +01001299 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001300 * Sanity check the domain. We don't support domains across
1301 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001302 */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001303 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001304 dev_err(dev,
1305 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphyadfec2e2016-09-12 17:13:55 +01001306 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Sricharan Rd4a44f02018-12-04 11:52:10 +05301307 ret = -EINVAL;
1308 goto rpm_put;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001309 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001310
1311 /* Looks ok, so add the device to the domain */
Sricharan Rd4a44f02018-12-04 11:52:10 +05301312 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
1313
1314rpm_put:
1315 arm_smmu_rpm_put(smmu);
1316 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001317}
1318
Will Deacon45ae7cf2013-06-24 18:31:25 +01001319static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001320 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001321{
Robin Murphy523d7422017-06-22 16:53:56 +01001322 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301323 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1324 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001325
Will Deacon518f7132014-11-14 17:17:54 +00001326 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001327 return -ENODEV;
1328
Sricharan Rd4a44f02018-12-04 11:52:10 +05301329 arm_smmu_rpm_get(smmu);
1330 ret = ops->map(ops, iova, paddr, size, prot);
1331 arm_smmu_rpm_put(smmu);
1332
1333 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001334}
1335
1336static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1337 size_t size)
1338{
Robin Murphy523d7422017-06-22 16:53:56 +01001339 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301340 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1341 size_t ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001342
Will Deacon518f7132014-11-14 17:17:54 +00001343 if (!ops)
1344 return 0;
1345
Sricharan Rd4a44f02018-12-04 11:52:10 +05301346 arm_smmu_rpm_get(smmu);
1347 ret = ops->unmap(ops, iova, size);
1348 arm_smmu_rpm_put(smmu);
1349
1350 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001351}
1352
Robin Murphy44f68762018-09-20 17:10:27 +01001353static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
1354{
1355 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301356 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy44f68762018-09-20 17:10:27 +01001357
Sricharan Rd4a44f02018-12-04 11:52:10 +05301358 if (smmu_domain->tlb_ops) {
1359 arm_smmu_rpm_get(smmu);
Robin Murphy44f68762018-09-20 17:10:27 +01001360 smmu_domain->tlb_ops->tlb_flush_all(smmu_domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301361 arm_smmu_rpm_put(smmu);
1362 }
Robin Murphy44f68762018-09-20 17:10:27 +01001363}
1364
Robin Murphy32b12442017-09-28 15:55:01 +01001365static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
1366{
1367 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301368 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy32b12442017-09-28 15:55:01 +01001369
Sricharan Rd4a44f02018-12-04 11:52:10 +05301370 if (smmu_domain->tlb_ops) {
1371 arm_smmu_rpm_get(smmu);
Robin Murphy32b12442017-09-28 15:55:01 +01001372 smmu_domain->tlb_ops->tlb_sync(smmu_domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301373 arm_smmu_rpm_put(smmu);
1374 }
Robin Murphy32b12442017-09-28 15:55:01 +01001375}
1376
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001377static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1378 dma_addr_t iova)
1379{
Joerg Roedel1d672632015-03-26 13:43:10 +01001380 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001381 struct arm_smmu_device *smmu = smmu_domain->smmu;
1382 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1383 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1384 struct device *dev = smmu->dev;
Robin Murphy19713fd2019-08-15 19:37:30 +01001385 void __iomem *reg;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001386 u32 tmp;
1387 u64 phys;
Robin Murphy523d7422017-06-22 16:53:56 +01001388 unsigned long va, flags;
Robin Murphy19713fd2019-08-15 19:37:30 +01001389 int ret, idx = cfg->cbndx;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301390
1391 ret = arm_smmu_rpm_get(smmu);
1392 if (ret < 0)
1393 return 0;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001394
Robin Murphy523d7422017-06-22 16:53:56 +01001395 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy661d9622015-05-27 17:09:34 +01001396 va = iova & ~0xfffUL;
Robin Murphy61005762019-08-15 19:37:28 +01001397 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy19713fd2019-08-15 19:37:30 +01001398 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
Robin Murphy61005762019-08-15 19:37:28 +01001399 else
Robin Murphy19713fd2019-08-15 19:37:30 +01001400 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001401
Robin Murphy19713fd2019-08-15 19:37:30 +01001402 reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR;
1403 if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ATSR_ACTIVE), 5, 50)) {
Robin Murphy523d7422017-06-22 16:53:56 +01001404 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001405 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001406 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001407 &iova);
1408 return ops->iova_to_phys(ops, iova);
1409 }
1410
Robin Murphy19713fd2019-08-15 19:37:30 +01001411 phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR);
Robin Murphy523d7422017-06-22 16:53:56 +01001412 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001413 if (phys & CB_PAR_F) {
1414 dev_err(dev, "translation fault!\n");
1415 dev_err(dev, "PAR = 0x%llx\n", phys);
1416 return 0;
1417 }
1418
Sricharan Rd4a44f02018-12-04 11:52:10 +05301419 arm_smmu_rpm_put(smmu);
1420
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001421 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1422}
1423
Will Deacon45ae7cf2013-06-24 18:31:25 +01001424static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001425 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001426{
Joerg Roedel1d672632015-03-26 13:43:10 +01001427 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Robin Murphy523d7422017-06-22 16:53:56 +01001428 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001429
Sunil Gouthambdf95922017-04-25 15:27:52 +05301430 if (domain->type == IOMMU_DOMAIN_IDENTITY)
1431 return iova;
1432
Will Deacon518f7132014-11-14 17:17:54 +00001433 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001434 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001435
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001436 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
Robin Murphy523d7422017-06-22 16:53:56 +01001437 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1438 return arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001439
Robin Murphy523d7422017-06-22 16:53:56 +01001440 return ops->iova_to_phys(ops, iova);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001441}
1442
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001443static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001444{
Will Deacond0948942014-06-24 17:30:10 +01001445 switch (cap) {
1446 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001447 /*
1448 * Return true here as the SMMU can always send out coherent
1449 * requests.
1450 */
1451 return true;
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001452 case IOMMU_CAP_NOEXEC:
1453 return true;
Will Deacond0948942014-06-24 17:30:10 +01001454 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001455 return false;
Will Deacond0948942014-06-24 17:30:10 +01001456 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001457}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001458
Suzuki K Poulose92ce7e82019-06-14 18:54:00 +01001459static int arm_smmu_match_node(struct device *dev, const void *data)
Robin Murphy021bb842016-09-14 15:26:46 +01001460{
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001461 return dev->fwnode == data;
Robin Murphy021bb842016-09-14 15:26:46 +01001462}
1463
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001464static
1465struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
Robin Murphy021bb842016-09-14 15:26:46 +01001466{
1467 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001468 fwnode, arm_smmu_match_node);
Robin Murphy021bb842016-09-14 15:26:46 +01001469 put_device(dev);
1470 return dev ? dev_get_drvdata(dev) : NULL;
1471}
1472
Will Deacon03edb222015-01-19 14:27:33 +00001473static int arm_smmu_add_device(struct device *dev)
1474{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001475 struct arm_smmu_device *smmu;
Robin Murphyf80cd882016-09-14 15:21:39 +01001476 struct arm_smmu_master_cfg *cfg;
Joerg Roedel9b468f72018-11-29 14:01:00 +01001477 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001478 int i, ret;
1479
Robin Murphy021bb842016-09-14 15:26:46 +01001480 if (using_legacy_binding) {
1481 ret = arm_smmu_register_legacy_master(dev, &smmu);
Artem Savkova7990c62017-08-08 12:26:02 +02001482
1483 /*
1484 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
1485 * will allocate/initialise a new one. Thus we need to update fwspec for
1486 * later use.
1487 */
Joerg Roedel9b468f72018-11-29 14:01:00 +01001488 fwspec = dev_iommu_fwspec_get(dev);
Robin Murphy021bb842016-09-14 15:26:46 +01001489 if (ret)
1490 goto out_free;
Robin Murphy3c117b52016-11-02 17:31:32 +00001491 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001492 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
Robin Murphy021bb842016-09-14 15:26:46 +01001493 } else {
1494 return -ENODEV;
1495 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001496
1497 ret = -EINVAL;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001498 for (i = 0; i < fwspec->num_ids; i++) {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001499 u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]);
1500 u16 mask = FIELD_GET(SMR_MASK, fwspec->ids[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01001501
Robin Murphyadfec2e2016-09-12 17:13:55 +01001502 if (sid & ~smmu->streamid_mask) {
Robin Murphyf80cd882016-09-14 15:21:39 +01001503 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy021bb842016-09-14 15:26:46 +01001504 sid, smmu->streamid_mask);
1505 goto out_free;
1506 }
1507 if (mask & ~smmu->smr_mask_mask) {
1508 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
Peng Fan6323f472017-04-21 17:03:36 +08001509 mask, smmu->smr_mask_mask);
Robin Murphyf80cd882016-09-14 15:21:39 +01001510 goto out_free;
1511 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001512 }
Will Deacon03edb222015-01-19 14:27:33 +00001513
Robin Murphyadfec2e2016-09-12 17:13:55 +01001514 ret = -ENOMEM;
1515 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1516 GFP_KERNEL);
1517 if (!cfg)
1518 goto out_free;
1519
1520 cfg->smmu = smmu;
1521 fwspec->iommu_priv = cfg;
1522 while (i--)
1523 cfg->smendx[i] = INVALID_SMENDX;
1524
Sricharan Rd4a44f02018-12-04 11:52:10 +05301525 ret = arm_smmu_rpm_get(smmu);
1526 if (ret < 0)
1527 goto out_cfg_free;
1528
Robin Murphy588888a2016-09-12 17:13:54 +01001529 ret = arm_smmu_master_alloc_smes(dev);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301530 arm_smmu_rpm_put(smmu);
1531
Robin Murphyadfec2e2016-09-12 17:13:55 +01001532 if (ret)
Vivek Gautamc54451a2017-07-06 15:07:00 +05301533 goto out_cfg_free;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001534
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001535 iommu_device_link(&smmu->iommu, dev);
1536
Sricharan R655e3642018-12-04 11:52:11 +05301537 device_link_add(dev, smmu->dev,
1538 DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
1539
Robin Murphyadfec2e2016-09-12 17:13:55 +01001540 return 0;
Robin Murphyf80cd882016-09-14 15:21:39 +01001541
Vivek Gautamc54451a2017-07-06 15:07:00 +05301542out_cfg_free:
1543 kfree(cfg);
Robin Murphyf80cd882016-09-14 15:21:39 +01001544out_free:
Robin Murphyadfec2e2016-09-12 17:13:55 +01001545 iommu_fwspec_free(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001546 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00001547}
1548
Will Deacon45ae7cf2013-06-24 18:31:25 +01001549static void arm_smmu_remove_device(struct device *dev)
1550{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001551 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001552 struct arm_smmu_master_cfg *cfg;
1553 struct arm_smmu_device *smmu;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301554 int ret;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001555
Robin Murphyadfec2e2016-09-12 17:13:55 +01001556 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyf80cd882016-09-14 15:21:39 +01001557 return;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001558
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001559 cfg = fwspec->iommu_priv;
1560 smmu = cfg->smmu;
1561
Sricharan Rd4a44f02018-12-04 11:52:10 +05301562 ret = arm_smmu_rpm_get(smmu);
1563 if (ret < 0)
1564 return;
1565
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001566 iommu_device_unlink(&smmu->iommu, dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001567 arm_smmu_master_free_smes(fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301568
1569 arm_smmu_rpm_put(smmu);
1570
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001571 iommu_group_remove_device(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001572 kfree(fwspec->iommu_priv);
1573 iommu_fwspec_free(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001574}
1575
Joerg Roedelaf659932015-10-21 23:51:41 +02001576static struct iommu_group *arm_smmu_device_group(struct device *dev)
1577{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001578 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001579 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy588888a2016-09-12 17:13:54 +01001580 struct iommu_group *group = NULL;
1581 int i, idx;
1582
Robin Murphyadfec2e2016-09-12 17:13:55 +01001583 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001584 if (group && smmu->s2crs[idx].group &&
1585 group != smmu->s2crs[idx].group)
1586 return ERR_PTR(-EINVAL);
1587
1588 group = smmu->s2crs[idx].group;
1589 }
1590
1591 if (group)
Robin Murphye1b44cb2016-11-11 17:59:22 +00001592 return iommu_group_ref_get(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001593
1594 if (dev_is_pci(dev))
1595 group = pci_device_group(dev);
Nipun Guptaeab03e22018-09-10 19:19:18 +05301596 else if (dev_is_fsl_mc(dev))
1597 group = fsl_mc_device_group(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001598 else
1599 group = generic_device_group(dev);
1600
Joerg Roedelaf659932015-10-21 23:51:41 +02001601 return group;
1602}
1603
Will Deaconc752ce42014-06-25 22:46:31 +01001604static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1605 enum iommu_attr attr, void *data)
1606{
Joerg Roedel1d672632015-03-26 13:43:10 +01001607 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001608
Robin Murphy44f68762018-09-20 17:10:27 +01001609 switch(domain->type) {
1610 case IOMMU_DOMAIN_UNMANAGED:
1611 switch (attr) {
1612 case DOMAIN_ATTR_NESTING:
1613 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1614 return 0;
1615 default:
1616 return -ENODEV;
1617 }
1618 break;
1619 case IOMMU_DOMAIN_DMA:
1620 switch (attr) {
1621 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1622 *(int *)data = smmu_domain->non_strict;
1623 return 0;
1624 default:
1625 return -ENODEV;
1626 }
1627 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001628 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001629 return -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001630 }
1631}
1632
1633static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1634 enum iommu_attr attr, void *data)
1635{
Will Deacon518f7132014-11-14 17:17:54 +00001636 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001637 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001638
Will Deacon518f7132014-11-14 17:17:54 +00001639 mutex_lock(&smmu_domain->init_mutex);
1640
Robin Murphy44f68762018-09-20 17:10:27 +01001641 switch(domain->type) {
1642 case IOMMU_DOMAIN_UNMANAGED:
1643 switch (attr) {
1644 case DOMAIN_ATTR_NESTING:
1645 if (smmu_domain->smmu) {
1646 ret = -EPERM;
1647 goto out_unlock;
1648 }
1649
1650 if (*(int *)data)
1651 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1652 else
1653 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1654 break;
1655 default:
1656 ret = -ENODEV;
Will Deacon518f7132014-11-14 17:17:54 +00001657 }
Robin Murphy44f68762018-09-20 17:10:27 +01001658 break;
1659 case IOMMU_DOMAIN_DMA:
1660 switch (attr) {
1661 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1662 smmu_domain->non_strict = *(int *)data;
1663 break;
1664 default:
1665 ret = -ENODEV;
1666 }
Will Deacon518f7132014-11-14 17:17:54 +00001667 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001668 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001669 ret = -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001670 }
Will Deacon518f7132014-11-14 17:17:54 +00001671out_unlock:
1672 mutex_unlock(&smmu_domain->init_mutex);
1673 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001674}
1675
Robin Murphy021bb842016-09-14 15:26:46 +01001676static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1677{
Robin Murphy56fbf602017-03-31 12:03:33 +01001678 u32 mask, fwid = 0;
Robin Murphy021bb842016-09-14 15:26:46 +01001679
1680 if (args->args_count > 0)
Robin Murphy0caf5f42019-08-15 19:37:23 +01001681 fwid |= FIELD_PREP(SMR_ID, args->args[0]);
Robin Murphy021bb842016-09-14 15:26:46 +01001682
1683 if (args->args_count > 1)
Robin Murphy0caf5f42019-08-15 19:37:23 +01001684 fwid |= FIELD_PREP(SMR_MASK, args->args[1]);
Robin Murphy56fbf602017-03-31 12:03:33 +01001685 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
Robin Murphy0caf5f42019-08-15 19:37:23 +01001686 fwid |= FIELD_PREP(SMR_MASK, mask);
Robin Murphy021bb842016-09-14 15:26:46 +01001687
1688 return iommu_fwspec_add_ids(dev, &fwid, 1);
1689}
1690
Eric Augerf3ebee82017-01-19 20:57:55 +00001691static void arm_smmu_get_resv_regions(struct device *dev,
1692 struct list_head *head)
1693{
1694 struct iommu_resv_region *region;
1695 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1696
1697 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00001698 prot, IOMMU_RESV_SW_MSI);
Eric Augerf3ebee82017-01-19 20:57:55 +00001699 if (!region)
1700 return;
1701
1702 list_add_tail(&region->list, head);
Robin Murphy273df962017-03-16 17:00:19 +00001703
1704 iommu_dma_get_resv_regions(dev, head);
Eric Augerf3ebee82017-01-19 20:57:55 +00001705}
1706
1707static void arm_smmu_put_resv_regions(struct device *dev,
1708 struct list_head *head)
1709{
1710 struct iommu_resv_region *entry, *next;
1711
1712 list_for_each_entry_safe(entry, next, head, list)
1713 kfree(entry);
1714}
1715
Will Deacon518f7132014-11-14 17:17:54 +00001716static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001717 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001718 .domain_alloc = arm_smmu_domain_alloc,
1719 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001720 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001721 .map = arm_smmu_map,
1722 .unmap = arm_smmu_unmap,
Robin Murphy44f68762018-09-20 17:10:27 +01001723 .flush_iotlb_all = arm_smmu_flush_iotlb_all,
Robin Murphy32b12442017-09-28 15:55:01 +01001724 .iotlb_sync = arm_smmu_iotlb_sync,
Will Deaconc752ce42014-06-25 22:46:31 +01001725 .iova_to_phys = arm_smmu_iova_to_phys,
1726 .add_device = arm_smmu_add_device,
1727 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001728 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001729 .domain_get_attr = arm_smmu_domain_get_attr,
1730 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy021bb842016-09-14 15:26:46 +01001731 .of_xlate = arm_smmu_of_xlate,
Eric Augerf3ebee82017-01-19 20:57:55 +00001732 .get_resv_regions = arm_smmu_get_resv_regions,
1733 .put_resv_regions = arm_smmu_put_resv_regions,
Will Deacon518f7132014-11-14 17:17:54 +00001734 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001735};
1736
1737static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1738{
1739 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001740 int i;
Peng Fan3ca37122016-05-03 21:50:30 +08001741 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001742
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001743 /* clear global FSR */
1744 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1745 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001746
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001747 /*
1748 * Reset stream mapping groups: Initial values mark all SMRn as
1749 * invalid and all S2CRn as bypass unless overridden.
1750 */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001751 for (i = 0; i < smmu->num_mapping_groups; ++i)
1752 arm_smmu_write_sme(smmu, i);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001753
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301754 if (smmu->model == ARM_MMU500) {
1755 /*
1756 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1757 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1758 * bit is only present in MMU-500r2 onwards.
1759 */
1760 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
Robin Murphy0caf5f42019-08-15 19:37:23 +01001761 major = FIELD_GET(ID7_MAJOR, reg);
Peng Fan3ca37122016-05-03 21:50:30 +08001762 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301763 if (major >= 2)
1764 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1765 /*
1766 * Allow unmatched Stream IDs to allocate bypass
1767 * TLB entries for reduced latency.
1768 */
Feng Kan74f55d32017-10-11 15:08:39 -07001769 reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN;
Peng Fan3ca37122016-05-03 21:50:30 +08001770 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1771 }
1772
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001773 /* Make sure all context banks are disabled and clear CB_FSR */
1774 for (i = 0; i < smmu->num_context_banks; ++i) {
Robin Murphy90df3732017-08-08 14:56:14 +01001775 arm_smmu_write_context_bank(smmu, i);
Robin Murphy19713fd2019-08-15 19:37:30 +01001776 arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, FSR_FAULT);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001777 /*
1778 * Disable MMU-500's not-particularly-beneficial next-page
1779 * prefetcher for the sake of errata #841119 and #826419.
1780 */
1781 if (smmu->model == ARM_MMU500) {
Robin Murphy19713fd2019-08-15 19:37:30 +01001782 reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001783 reg &= ~ARM_MMU500_ACTLR_CPRE;
Robin Murphy19713fd2019-08-15 19:37:30 +01001784 arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_ACTLR, reg);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001785 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001786 }
Will Deacon1463fe42013-07-31 19:21:27 +01001787
Will Deacon45ae7cf2013-06-24 18:31:25 +01001788 /* Invalidate the TLB, just in case */
Robin Murphy4e4abae2019-06-03 14:15:37 +02001789 writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1790 writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001791
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001792 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001793
Will Deacon45ae7cf2013-06-24 18:31:25 +01001794 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001795 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001796
1797 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001798 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001799
Robin Murphy25a1c962016-02-10 14:25:33 +00001800 /* Enable client access, handling unmatched streams as appropriate */
1801 reg &= ~sCR0_CLIENTPD;
1802 if (disable_bypass)
1803 reg |= sCR0_USFCFG;
1804 else
1805 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001806
1807 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001808 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001809
1810 /* Don't upgrade barriers */
Robin Murphy0caf5f42019-08-15 19:37:23 +01001811 reg &= ~(sCR0_BSU);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001812
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001813 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1814 reg |= sCR0_VMID16EN;
1815
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001816 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1817 reg |= sCR0_EXIDENABLE;
1818
Will Deacon45ae7cf2013-06-24 18:31:25 +01001819 /* Push the button */
Robin Murphy11febfc2017-03-30 17:56:31 +01001820 arm_smmu_tlb_sync_global(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001821 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001822}
1823
1824static int arm_smmu_id_size_to_bits(int size)
1825{
1826 switch (size) {
1827 case 0:
1828 return 32;
1829 case 1:
1830 return 36;
1831 case 2:
1832 return 40;
1833 case 3:
1834 return 42;
1835 case 4:
1836 return 44;
1837 case 5:
1838 default:
1839 return 48;
1840 }
1841}
1842
1843static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1844{
Robin Murphy490325e2019-08-15 19:37:26 +01001845 unsigned int size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001846 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1847 u32 id;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001848 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001849 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001850
1851 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001852 dev_notice(smmu->dev, "SMMUv%d with:\n",
1853 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001854
1855 /* ID0 */
1856 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001857
1858 /* Restrict available stages based on module parameter */
1859 if (force_stage == 1)
1860 id &= ~(ID0_S2TS | ID0_NTS);
1861 else if (force_stage == 2)
1862 id &= ~(ID0_S1TS | ID0_NTS);
1863
Will Deacon45ae7cf2013-06-24 18:31:25 +01001864 if (id & ID0_S1TS) {
1865 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1866 dev_notice(smmu->dev, "\tstage 1 translation\n");
1867 }
1868
1869 if (id & ID0_S2TS) {
1870 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1871 dev_notice(smmu->dev, "\tstage 2 translation\n");
1872 }
1873
1874 if (id & ID0_NTS) {
1875 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1876 dev_notice(smmu->dev, "\tnested translation\n");
1877 }
1878
1879 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001880 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001881 dev_err(smmu->dev, "\tno translation support!\n");
1882 return -ENODEV;
1883 }
1884
Robin Murphyb7862e32016-04-13 18:13:03 +01001885 if ((id & ID0_S1TS) &&
1886 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001887 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1888 dev_notice(smmu->dev, "\taddress translation ops\n");
1889 }
1890
Robin Murphybae2c2d2015-07-29 19:46:05 +01001891 /*
1892 * In order for DMA API calls to work properly, we must defer to what
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001893 * the FW says about coherency, regardless of what the hardware claims.
Robin Murphybae2c2d2015-07-29 19:46:05 +01001894 * Fortunately, this also opens up a workaround for systems where the
1895 * ID register value has ended up configured incorrectly.
1896 */
Robin Murphybae2c2d2015-07-29 19:46:05 +01001897 cttw_reg = !!(id & ID0_CTTW);
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001898 if (cttw_fw || cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001899 dev_notice(smmu->dev, "\t%scoherent table walk\n",
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001900 cttw_fw ? "" : "non-");
1901 if (cttw_fw != cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001902 dev_notice(smmu->dev,
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001903 "\t(IDR0.CTTW overridden by FW configuration)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001904
Robin Murphy21174242016-09-12 17:13:48 +01001905 /* Max. number of entries we have for stream matching/indexing */
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001906 if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
1907 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1908 size = 1 << 16;
1909 } else {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001910 size = 1 << FIELD_GET(ID0_NUMSIDB, id);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001911 }
Robin Murphy21174242016-09-12 17:13:48 +01001912 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001913 if (id & ID0_SMS) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001914 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy0caf5f42019-08-15 19:37:23 +01001915 size = FIELD_GET(ID0_NUMSMRG, id);
Robin Murphy21174242016-09-12 17:13:48 +01001916 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001917 dev_err(smmu->dev,
1918 "stream-matching supported, but no SMRs present!\n");
1919 return -ENODEV;
1920 }
1921
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001922 /* Zero-initialised to mark as invalid */
1923 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1924 GFP_KERNEL);
1925 if (!smmu->smrs)
1926 return -ENOMEM;
1927
Will Deacon45ae7cf2013-06-24 18:31:25 +01001928 dev_notice(smmu->dev,
Robin Murphy490325e2019-08-15 19:37:26 +01001929 "\tstream matching with %u register groups", size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001930 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001931 /* s2cr->type == 0 means translation, so initialise explicitly */
1932 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1933 GFP_KERNEL);
1934 if (!smmu->s2crs)
1935 return -ENOMEM;
1936 for (i = 0; i < size; i++)
1937 smmu->s2crs[i] = s2cr_init_val;
1938
Robin Murphy21174242016-09-12 17:13:48 +01001939 smmu->num_mapping_groups = size;
Robin Murphy588888a2016-09-12 17:13:54 +01001940 mutex_init(&smmu->stream_map_mutex);
Will Deacon8e517e72017-07-06 15:55:48 +01001941 spin_lock_init(&smmu->global_sync_lock);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001942
Robin Murphy7602b872016-04-28 17:12:09 +01001943 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1944 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1945 if (!(id & ID0_PTFS_NO_AARCH32S))
1946 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1947 }
1948
Will Deacon45ae7cf2013-06-24 18:31:25 +01001949 /* ID1 */
1950 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001951 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001952
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001953 /* Check for size mismatch of SMMU address space from mapped region */
Robin Murphy0caf5f42019-08-15 19:37:23 +01001954 size = 1 << (FIELD_GET(ID1_NUMPAGENDXB, id) + 1);
Robin Murphy490325e2019-08-15 19:37:26 +01001955 if (smmu->numpage != 2 * size << smmu->pgshift)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001956 dev_warn(smmu->dev,
Robin Murphy490325e2019-08-15 19:37:26 +01001957 "SMMU address space size (0x%x) differs from mapped region size (0x%x)!\n",
1958 2 * size << smmu->pgshift, smmu->numpage);
1959 /* Now properly encode NUMPAGE to subsequently derive SMMU_CB_BASE */
1960 smmu->numpage = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001961
Robin Murphy0caf5f42019-08-15 19:37:23 +01001962 smmu->num_s2_context_banks = FIELD_GET(ID1_NUMS2CB, id);
1963 smmu->num_context_banks = FIELD_GET(ID1_NUMCB, id);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001964 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1965 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1966 return -ENODEV;
1967 }
1968 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1969 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001970 /*
1971 * Cavium CN88xx erratum #27704.
1972 * Ensure ASID and VMID allocation is unique across all SMMUs in
1973 * the system.
1974 */
1975 if (smmu->model == CAVIUM_SMMUV2) {
1976 smmu->cavium_id_base =
1977 atomic_add_return(smmu->num_context_banks,
1978 &cavium_smmu_context_count);
1979 smmu->cavium_id_base -= smmu->num_context_banks;
Robert Richter53c35dce2017-03-13 11:39:01 +01001980 dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
Robin Murphye086d912016-04-13 18:12:58 +01001981 }
Robin Murphy90df3732017-08-08 14:56:14 +01001982 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
1983 sizeof(*smmu->cbs), GFP_KERNEL);
1984 if (!smmu->cbs)
1985 return -ENOMEM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001986
1987 /* ID2 */
1988 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
Robin Murphy0caf5f42019-08-15 19:37:23 +01001989 size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_IAS, id));
Will Deacon518f7132014-11-14 17:17:54 +00001990 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001991
Will Deacon518f7132014-11-14 17:17:54 +00001992 /* The output mask is also applied for bypass */
Robin Murphy0caf5f42019-08-15 19:37:23 +01001993 size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_OAS, id));
Will Deacon518f7132014-11-14 17:17:54 +00001994 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001995
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001996 if (id & ID2_VMID16)
1997 smmu->features |= ARM_SMMU_FEAT_VMID16;
1998
Robin Murphyf1d84542015-03-04 16:41:05 +00001999 /*
2000 * What the page table walker can address actually depends on which
2001 * descriptor format is in use, but since a) we don't know that yet,
2002 * and b) it can vary per context bank, this will have to do...
2003 */
2004 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
2005 dev_warn(smmu->dev,
2006 "failed to set DMA mask for table walker\n");
2007
Robin Murphyb7862e32016-04-13 18:13:03 +01002008 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00002009 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01002010 if (smmu->version == ARM_SMMU_V1_64K)
2011 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002012 } else {
Robin Murphy0caf5f42019-08-15 19:37:23 +01002013 size = FIELD_GET(ID2_UBS, id);
Will Deacon518f7132014-11-14 17:17:54 +00002014 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00002015 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01002016 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00002017 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01002018 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00002019 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01002020 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002021 }
2022
Robin Murphy7602b872016-04-28 17:12:09 +01002023 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01002024 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01002025 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01002026 if (smmu->features &
2027 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01002028 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01002029 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01002030 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01002031 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01002032 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01002033
Robin Murphyd5466352016-05-09 17:20:09 +01002034 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2035 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2036 else
2037 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2038 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
2039 smmu->pgsize_bitmap);
2040
Will Deacon518f7132014-11-14 17:17:54 +00002041
Will Deacon28d60072014-09-01 16:24:48 +01002042 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
2043 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002044 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002045
2046 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
2047 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002048 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002049
Will Deacon45ae7cf2013-06-24 18:31:25 +01002050 return 0;
2051}
2052
Robin Murphy67b65a32016-04-13 18:12:57 +01002053struct arm_smmu_match_data {
2054 enum arm_smmu_arch_version version;
2055 enum arm_smmu_implementation model;
2056};
2057
2058#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
Sricharan R96a299d2018-12-04 11:52:09 +05302059static const struct arm_smmu_match_data name = { .version = ver, .model = imp }
Robin Murphy67b65a32016-04-13 18:12:57 +01002060
2061ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
2062ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01002063ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002064ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01002065ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Vivek Gautam89cddc52018-12-04 11:52:13 +05302066ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01002067
Joerg Roedel09b52692014-10-02 12:24:45 +02002068static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01002069 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
2070 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
2071 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01002072 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002073 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01002074 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Vivek Gautam89cddc52018-12-04 11:52:13 +05302075 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01002076 { },
2077};
Robin Murphy09360402014-08-28 17:51:59 +01002078
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002079#ifdef CONFIG_ACPI
2080static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
2081{
2082 int ret = 0;
2083
2084 switch (model) {
2085 case ACPI_IORT_SMMU_V1:
2086 case ACPI_IORT_SMMU_CORELINK_MMU400:
2087 smmu->version = ARM_SMMU_V1;
2088 smmu->model = GENERIC_SMMU;
2089 break;
Robin Murphy84c24372017-06-19 16:41:56 +01002090 case ACPI_IORT_SMMU_CORELINK_MMU401:
2091 smmu->version = ARM_SMMU_V1_64K;
2092 smmu->model = GENERIC_SMMU;
2093 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002094 case ACPI_IORT_SMMU_V2:
2095 smmu->version = ARM_SMMU_V2;
2096 smmu->model = GENERIC_SMMU;
2097 break;
2098 case ACPI_IORT_SMMU_CORELINK_MMU500:
2099 smmu->version = ARM_SMMU_V2;
2100 smmu->model = ARM_MMU500;
2101 break;
Robin Murphy84c24372017-06-19 16:41:56 +01002102 case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
2103 smmu->version = ARM_SMMU_V2;
2104 smmu->model = CAVIUM_SMMUV2;
2105 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002106 default:
2107 ret = -ENODEV;
2108 }
2109
2110 return ret;
2111}
2112
2113static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2114 struct arm_smmu_device *smmu)
2115{
2116 struct device *dev = smmu->dev;
2117 struct acpi_iort_node *node =
2118 *(struct acpi_iort_node **)dev_get_platdata(dev);
2119 struct acpi_iort_smmu *iort_smmu;
2120 int ret;
2121
2122 /* Retrieve SMMU1/2 specific data */
2123 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
2124
2125 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
2126 if (ret < 0)
2127 return ret;
2128
2129 /* Ignore the configuration access interrupt */
2130 smmu->num_global_irqs = 1;
2131
2132 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
2133 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2134
2135 return 0;
2136}
2137#else
2138static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2139 struct arm_smmu_device *smmu)
2140{
2141 return -ENODEV;
2142}
2143#endif
2144
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002145static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2146 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002147{
Robin Murphy67b65a32016-04-13 18:12:57 +01002148 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002149 struct device *dev = &pdev->dev;
Robin Murphy021bb842016-09-14 15:26:46 +01002150 bool legacy_binding;
2151
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002152 if (of_property_read_u32(dev->of_node, "#global-interrupts",
2153 &smmu->num_global_irqs)) {
2154 dev_err(dev, "missing #global-interrupts property\n");
2155 return -ENODEV;
2156 }
2157
2158 data = of_device_get_match_data(dev);
2159 smmu->version = data->version;
2160 smmu->model = data->model;
2161
2162 parse_driver_options(smmu);
2163
Robin Murphy021bb842016-09-14 15:26:46 +01002164 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
2165 if (legacy_binding && !using_generic_binding) {
2166 if (!using_legacy_binding)
2167 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
2168 using_legacy_binding = true;
2169 } else if (!legacy_binding && !using_legacy_binding) {
2170 using_generic_binding = true;
2171 } else {
2172 dev_err(dev, "not probing due to mismatched DT properties\n");
2173 return -ENODEV;
2174 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002175
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002176 if (of_dma_is_coherent(dev->of_node))
2177 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2178
2179 return 0;
2180}
2181
Robin Murphyf6810c12017-04-10 16:51:05 +05302182static void arm_smmu_bus_init(void)
2183{
2184 /* Oh, for a proper bus abstraction */
2185 if (!iommu_present(&platform_bus_type))
2186 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2187#ifdef CONFIG_ARM_AMBA
2188 if (!iommu_present(&amba_bustype))
2189 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2190#endif
2191#ifdef CONFIG_PCI
2192 if (!iommu_present(&pci_bus_type)) {
2193 pci_request_acs();
2194 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2195 }
2196#endif
Nipun Guptaeab03e22018-09-10 19:19:18 +05302197#ifdef CONFIG_FSL_MC_BUS
2198 if (!iommu_present(&fsl_mc_bus_type))
2199 bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
2200#endif
Robin Murphyf6810c12017-04-10 16:51:05 +05302201}
2202
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002203static int arm_smmu_device_probe(struct platform_device *pdev)
2204{
2205 struct resource *res;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002206 resource_size_t ioaddr;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002207 struct arm_smmu_device *smmu;
2208 struct device *dev = &pdev->dev;
2209 int num_irqs, i, err;
2210
Will Deacon45ae7cf2013-06-24 18:31:25 +01002211 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2212 if (!smmu) {
2213 dev_err(dev, "failed to allocate arm_smmu_device\n");
2214 return -ENOMEM;
2215 }
2216 smmu->dev = dev;
2217
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002218 if (dev->of_node)
2219 err = arm_smmu_device_dt_probe(pdev, smmu);
2220 else
2221 err = arm_smmu_device_acpi_probe(pdev, smmu);
2222
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002223 if (err)
2224 return err;
Robin Murphy09360402014-08-28 17:51:59 +01002225
Will Deacon45ae7cf2013-06-24 18:31:25 +01002226 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002227 ioaddr = res->start;
Julia Lawall8a7f4312013-08-19 12:20:37 +01002228 smmu->base = devm_ioremap_resource(dev, res);
2229 if (IS_ERR(smmu->base))
2230 return PTR_ERR(smmu->base);
Robin Murphy490325e2019-08-15 19:37:26 +01002231 /*
2232 * The resource size should effectively match the value of SMMU_TOP;
2233 * stash that temporarily until we know PAGESIZE to validate it with.
2234 */
2235 smmu->numpage = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002236
Will Deacon45ae7cf2013-06-24 18:31:25 +01002237 num_irqs = 0;
2238 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2239 num_irqs++;
2240 if (num_irqs > smmu->num_global_irqs)
2241 smmu->num_context_irqs++;
2242 }
2243
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002244 if (!smmu->num_context_irqs) {
2245 dev_err(dev, "found %d interrupts but expected at least %d\n",
2246 num_irqs, smmu->num_global_irqs + 1);
2247 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002248 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002249
Kees Cooka86854d2018-06-12 14:07:58 -07002250 smmu->irqs = devm_kcalloc(dev, num_irqs, sizeof(*smmu->irqs),
Will Deacon45ae7cf2013-06-24 18:31:25 +01002251 GFP_KERNEL);
2252 if (!smmu->irqs) {
2253 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2254 return -ENOMEM;
2255 }
2256
2257 for (i = 0; i < num_irqs; ++i) {
2258 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002259
Will Deacon45ae7cf2013-06-24 18:31:25 +01002260 if (irq < 0) {
2261 dev_err(dev, "failed to get irq index %d\n", i);
2262 return -ENODEV;
2263 }
2264 smmu->irqs[i] = irq;
2265 }
2266
Sricharan R96a299d2018-12-04 11:52:09 +05302267 err = devm_clk_bulk_get_all(dev, &smmu->clks);
2268 if (err < 0) {
2269 dev_err(dev, "failed to get clocks %d\n", err);
2270 return err;
2271 }
2272 smmu->num_clks = err;
2273
2274 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
2275 if (err)
2276 return err;
2277
Olav Haugan3c8766d2014-08-22 17:12:32 -07002278 err = arm_smmu_device_cfg_probe(smmu);
2279 if (err)
2280 return err;
2281
Vivek Gautamd1e20222018-07-19 23:23:56 +05302282 if (smmu->version == ARM_SMMU_V2) {
2283 if (smmu->num_context_banks > smmu->num_context_irqs) {
2284 dev_err(dev,
2285 "found only %d context irq(s) but %d required\n",
2286 smmu->num_context_irqs, smmu->num_context_banks);
2287 return -ENODEV;
2288 }
2289
2290 /* Ignore superfluous interrupts */
2291 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002292 }
2293
Will Deacon45ae7cf2013-06-24 18:31:25 +01002294 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08002295 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2296 arm_smmu_global_fault,
2297 IRQF_SHARED,
2298 "arm-smmu global fault",
2299 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002300 if (err) {
2301 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2302 i, smmu->irqs[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01002303 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002304 }
2305 }
2306
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002307 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2308 "smmu.%pa", &ioaddr);
2309 if (err) {
2310 dev_err(dev, "Failed to register iommu in sysfs\n");
2311 return err;
2312 }
2313
2314 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2315 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2316
2317 err = iommu_device_register(&smmu->iommu);
2318 if (err) {
2319 dev_err(dev, "Failed to register iommu\n");
2320 return err;
2321 }
2322
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002323 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01002324 arm_smmu_device_reset(smmu);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03002325 arm_smmu_test_smr_masks(smmu);
Robin Murphy021bb842016-09-14 15:26:46 +01002326
Robin Murphyf6810c12017-04-10 16:51:05 +05302327 /*
Sricharan Rd4a44f02018-12-04 11:52:10 +05302328 * We want to avoid touching dev->power.lock in fastpaths unless
2329 * it's really going to do something useful - pm_runtime_enabled()
2330 * can serve as an ideal proxy for that decision. So, conditionally
2331 * enable pm_runtime.
2332 */
2333 if (dev->pm_domain) {
2334 pm_runtime_set_active(dev);
2335 pm_runtime_enable(dev);
2336 }
2337
2338 /*
Robin Murphyf6810c12017-04-10 16:51:05 +05302339 * For ACPI and generic DT bindings, an SMMU will be probed before
2340 * any device which might need it, so we want the bus ops in place
2341 * ready to handle default domain setup as soon as any SMMU exists.
2342 */
2343 if (!using_legacy_binding)
2344 arm_smmu_bus_init();
2345
Will Deacon45ae7cf2013-06-24 18:31:25 +01002346 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002347}
2348
Robin Murphyf6810c12017-04-10 16:51:05 +05302349/*
2350 * With the legacy DT binding in play, though, we have no guarantees about
2351 * probe order, but then we're also not doing default domains, so we can
2352 * delay setting bus ops until we're sure every possible SMMU is ready,
2353 * and that way ensure that no add_device() calls get missed.
2354 */
2355static int arm_smmu_legacy_bus_init(void)
2356{
2357 if (using_legacy_binding)
2358 arm_smmu_bus_init();
2359 return 0;
2360}
2361device_initcall_sync(arm_smmu_legacy_bus_init);
2362
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002363static void arm_smmu_device_shutdown(struct platform_device *pdev)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002364{
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002365 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002366
2367 if (!smmu)
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002368 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002369
Will Deaconecfadb62013-07-31 19:21:28 +01002370 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002371 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002372
Sricharan Rd4a44f02018-12-04 11:52:10 +05302373 arm_smmu_rpm_get(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002374 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002375 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Sricharan Rd4a44f02018-12-04 11:52:10 +05302376 arm_smmu_rpm_put(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302377
Sricharan Rd4a44f02018-12-04 11:52:10 +05302378 if (pm_runtime_enabled(smmu->dev))
2379 pm_runtime_force_suspend(smmu->dev);
2380 else
2381 clk_bulk_disable(smmu->num_clks, smmu->clks);
2382
2383 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
Nate Watterson7aa86192017-06-29 18:18:15 -04002384}
2385
Sricharan R96a299d2018-12-04 11:52:09 +05302386static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
Robin Murphya2d866f2017-08-08 14:56:15 +01002387{
2388 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
Sricharan R96a299d2018-12-04 11:52:09 +05302389 int ret;
2390
2391 ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
2392 if (ret)
2393 return ret;
Robin Murphya2d866f2017-08-08 14:56:15 +01002394
2395 arm_smmu_device_reset(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302396
Will Deacon45ae7cf2013-06-24 18:31:25 +01002397 return 0;
2398}
2399
Sricharan R96a299d2018-12-04 11:52:09 +05302400static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
Dan Carpenter6614ee72013-08-21 09:34:20 +01002401{
Sricharan R96a299d2018-12-04 11:52:09 +05302402 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2403
2404 clk_bulk_disable(smmu->num_clks, smmu->clks);
2405
2406 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002407}
2408
Robin Murphya2d866f2017-08-08 14:56:15 +01002409static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
2410{
Sricharan R96a299d2018-12-04 11:52:09 +05302411 if (pm_runtime_suspended(dev))
2412 return 0;
Robin Murphya2d866f2017-08-08 14:56:15 +01002413
Sricharan R96a299d2018-12-04 11:52:09 +05302414 return arm_smmu_runtime_resume(dev);
Robin Murphya2d866f2017-08-08 14:56:15 +01002415}
2416
Sricharan R96a299d2018-12-04 11:52:09 +05302417static int __maybe_unused arm_smmu_pm_suspend(struct device *dev)
2418{
2419 if (pm_runtime_suspended(dev))
2420 return 0;
2421
2422 return arm_smmu_runtime_suspend(dev);
2423}
2424
2425static const struct dev_pm_ops arm_smmu_pm_ops = {
2426 SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_pm_suspend, arm_smmu_pm_resume)
2427 SET_RUNTIME_PM_OPS(arm_smmu_runtime_suspend,
2428 arm_smmu_runtime_resume, NULL)
2429};
Robin Murphya2d866f2017-08-08 14:56:15 +01002430
Will Deacon45ae7cf2013-06-24 18:31:25 +01002431static struct platform_driver arm_smmu_driver = {
2432 .driver = {
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002433 .name = "arm-smmu",
2434 .of_match_table = of_match_ptr(arm_smmu_of_match),
2435 .pm = &arm_smmu_pm_ops,
2436 .suppress_bind_attrs = true,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002437 },
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002438 .probe = arm_smmu_device_probe,
Nate Watterson7aa86192017-06-29 18:18:15 -04002439 .shutdown = arm_smmu_device_shutdown,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002440};
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002441builtin_platform_driver(arm_smmu_driver);