blob: 243bc4cb2705b331d71329c61d753f303499c998 [file] [log] [blame]
Thomas Gleixner45051532019-05-29 16:57:47 -07001// SPDX-License-Identifier: GPL-2.0-only
Will Deacon45ae7cf2013-06-24 18:31:25 +01002/*
3 * IOMMU API for ARM architected SMMU implementations.
4 *
Will Deacon45ae7cf2013-06-24 18:31:25 +01005 * Copyright (C) 2013 ARM Limited
6 *
7 * Author: Will Deacon <will.deacon@arm.com>
8 *
9 * This driver currently supports:
10 * - SMMUv1 and v2 implementations
11 * - Stream-matching and stream-indexing
12 * - v7/v8 long-descriptor format
13 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010014 * - Context fault reporting
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +030015 * - Extended Stream ID (16 bit)
Will Deacon45ae7cf2013-06-24 18:31:25 +010016 */
17
18#define pr_fmt(fmt) "arm-smmu: " fmt
19
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +000020#include <linux/acpi.h>
21#include <linux/acpi_iort.h>
Robin Murphy0caf5f42019-08-15 19:37:23 +010022#include <linux/bitfield.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010023#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000024#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010025#include <linux/dma-mapping.h>
26#include <linux/err.h>
27#include <linux/interrupt.h>
28#include <linux/io.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000029#include <linux/iopoll.h>
Will Deaconb06c0762019-12-19 12:03:45 +000030#include <linux/module.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010031#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010032#include <linux/of_address.h>
Robin Murphyd6fc5d92016-09-12 17:13:52 +010033#include <linux/of_device.h>
Robin Murphyadfec2e2016-09-12 17:13:55 +010034#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010035#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010036#include <linux/platform_device.h>
Sricharan R96a299d2018-12-04 11:52:09 +053037#include <linux/pm_runtime.h>
Robin Murphy931a0ba2019-09-17 15:45:34 +010038#include <linux/ratelimit.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/slab.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010040
41#include <linux/amba/bus.h>
Nipun Guptaeab03e22018-09-10 19:19:18 +053042#include <linux/fsl/mc.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010043
Robin Murphyc5fc6482019-08-15 19:37:32 +010044#include "arm-smmu.h"
Rob Clark2b037742017-08-09 10:43:03 -040045
Robin Murphy4e4abae2019-06-03 14:15:37 +020046/*
47 * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
48 * global register space are still, in fact, using a hypervisor to mediate it
49 * by trapping and emulating register accesses. Sadly, some deployed versions
50 * of said trapping code have bugs wherein they go horribly wrong for stores
51 * using r31 (i.e. XZR/WZR) as the source register.
52 */
53#define QCOM_DUMMY_VAL -1
54
Rob Clark2b037742017-08-09 10:43:03 -040055#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
56#define TLB_SPIN_COUNT 10
Will Deacon45ae7cf2013-06-24 18:31:25 +010057
Eric Augerf3ebee82017-01-19 20:57:55 +000058#define MSI_IOVA_BASE 0x8000000
59#define MSI_IOVA_LENGTH 0x100000
60
Will Deacon4cf740b2014-07-14 19:47:39 +010061static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +000062module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +010063MODULE_PARM_DESC(force_stage,
64 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Douglas Anderson954a03b2019-03-01 11:20:17 -080065static bool disable_bypass =
66 IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT);
Robin Murphy25a1c962016-02-10 14:25:33 +000067module_param(disable_bypass, bool, S_IRUGO);
68MODULE_PARM_DESC(disable_bypass,
69 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +010070
Robin Murphy8e8b2032016-09-12 17:13:50 +010071struct arm_smmu_s2cr {
Robin Murphy588888a2016-09-12 17:13:54 +010072 struct iommu_group *group;
73 int count;
Robin Murphy8e8b2032016-09-12 17:13:50 +010074 enum arm_smmu_s2cr_type type;
75 enum arm_smmu_s2cr_privcfg privcfg;
76 u8 cbndx;
77};
78
79#define s2cr_init_val (struct arm_smmu_s2cr){ \
80 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
81}
82
Will Deacon45ae7cf2013-06-24 18:31:25 +010083struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +010084 u16 mask;
85 u16 id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +010086 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +010087};
88
Robin Murphy90df3732017-08-08 14:56:14 +010089struct arm_smmu_cb {
90 u64 ttbr[2];
91 u32 tcr[2];
92 u32 mair[2];
93 struct arm_smmu_cfg *cfg;
94};
95
Will Deacona9a1b0b2014-05-01 18:05:08 +010096struct arm_smmu_master_cfg {
Robin Murphyf80cd882016-09-14 15:21:39 +010097 struct arm_smmu_device *smmu;
Robin Murphyadfec2e2016-09-12 17:13:55 +010098 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +010099};
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100100#define INVALID_SMENDX -1
Robin Murphy24651702020-03-26 16:08:35 +0100101#define cfg_smendx(cfg, fw, i) \
102 (i >= fw->num_ids ? INVALID_SMENDX : cfg->smendx[i])
103#define for_each_cfg_sme(cfg, fw, i, idx) \
104 for (i = 0; idx = cfg_smendx(cfg, fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100105
Robin Murphy021bb842016-09-14 15:26:46 +0100106static bool using_legacy_binding, using_generic_binding;
107
Sricharan Rd4a44f02018-12-04 11:52:10 +0530108static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
109{
110 if (pm_runtime_enabled(smmu->dev))
111 return pm_runtime_get_sync(smmu->dev);
112
113 return 0;
114}
115
116static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
117{
118 if (pm_runtime_enabled(smmu->dev))
Rob Clarkee9bdfe2019-10-31 14:31:02 -0700119 pm_runtime_put_autosuspend(smmu->dev);
Sricharan Rd4a44f02018-12-04 11:52:10 +0530120}
121
Joerg Roedel1d672632015-03-26 13:43:10 +0100122static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
123{
124 return container_of(dom, struct arm_smmu_domain, domain);
125}
126
Will Deaconcd221bd2019-12-19 12:03:51 +0000127static struct platform_driver arm_smmu_driver;
128static struct iommu_ops arm_smmu_ops;
129
130#ifdef CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS
131static int arm_smmu_bus_init(struct iommu_ops *ops);
132
Will Deacon8f68f8e2014-07-15 11:27:08 +0100133static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100134{
135 if (dev_is_pci(dev)) {
136 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700137
Will Deacona9a1b0b2014-05-01 18:05:08 +0100138 while (!pci_is_root_bus(bus))
139 bus = bus->parent;
Robin Murphyf80cd882016-09-14 15:21:39 +0100140 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100141 }
142
Robin Murphyf80cd882016-09-14 15:21:39 +0100143 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100144}
145
Robin Murphyf80cd882016-09-14 15:21:39 +0100146static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100147{
Robin Murphyf80cd882016-09-14 15:21:39 +0100148 *((__be32 *)data) = cpu_to_be32(alias);
149 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100150}
151
Robin Murphyf80cd882016-09-14 15:21:39 +0100152static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100153{
Robin Murphyf80cd882016-09-14 15:21:39 +0100154 struct of_phandle_iterator *it = *(void **)data;
155 struct device_node *np = it->node;
156 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100157
Robin Murphyf80cd882016-09-14 15:21:39 +0100158 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
Uwe Kleine-Königc680e9a2019-08-24 15:28:45 +0200159 "#stream-id-cells", -1)
Robin Murphyf80cd882016-09-14 15:21:39 +0100160 if (it->node == np) {
161 *(void **)data = dev;
162 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700163 }
Robin Murphyf80cd882016-09-14 15:21:39 +0100164 it->node = np;
165 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100166}
167
Robin Murphyadfec2e2016-09-12 17:13:55 +0100168static int arm_smmu_register_legacy_master(struct device *dev,
169 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100170{
Robin Murphyadfec2e2016-09-12 17:13:55 +0100171 struct device *smmu_dev;
Robin Murphyf80cd882016-09-14 15:21:39 +0100172 struct device_node *np;
173 struct of_phandle_iterator it;
174 void *data = &it;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100175 u32 *sids;
Robin Murphyf80cd882016-09-14 15:21:39 +0100176 __be32 pci_sid;
177 int err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100178
Robin Murphyf80cd882016-09-14 15:21:39 +0100179 np = dev_get_dev_node(dev);
180 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
181 of_node_put(np);
182 return -ENODEV;
183 }
184
185 it.node = np;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100186 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
187 __find_legacy_master_phandle);
Robin Murphyadfec2e2016-09-12 17:13:55 +0100188 smmu_dev = data;
Robin Murphyf80cd882016-09-14 15:21:39 +0100189 of_node_put(np);
190 if (err == 0)
191 return -ENODEV;
192 if (err < 0)
193 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100194
Robin Murphyf80cd882016-09-14 15:21:39 +0100195 if (dev_is_pci(dev)) {
196 /* "mmu-masters" assumes Stream ID == Requester ID */
197 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
198 &pci_sid);
199 it.cur = &pci_sid;
200 it.cur_count = 1;
201 }
202
Robin Murphyadfec2e2016-09-12 17:13:55 +0100203 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
204 &arm_smmu_ops);
205 if (err)
206 return err;
207
208 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
209 if (!sids)
Robin Murphyf80cd882016-09-14 15:21:39 +0100210 return -ENOMEM;
211
Robin Murphyadfec2e2016-09-12 17:13:55 +0100212 *smmu = dev_get_drvdata(smmu_dev);
213 of_phandle_iterator_args(&it, sids, it.cur_count);
214 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
215 kfree(sids);
216 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100217}
218
Will Deaconcd221bd2019-12-19 12:03:51 +0000219/*
220 * With the legacy DT binding in play, we have no guarantees about
221 * probe order, but then we're also not doing default domains, so we can
222 * delay setting bus ops until we're sure every possible SMMU is ready,
Joerg Roedelcefa0d52020-04-29 15:36:55 +0200223 * and that way ensure that no probe_device() calls get missed.
Will Deaconcd221bd2019-12-19 12:03:51 +0000224 */
225static int arm_smmu_legacy_bus_init(void)
226{
227 if (using_legacy_binding)
228 return arm_smmu_bus_init(&arm_smmu_ops);
229 return 0;
230}
231device_initcall_sync(arm_smmu_legacy_bus_init);
232#else
233static int arm_smmu_register_legacy_master(struct device *dev,
234 struct arm_smmu_device **smmu)
235{
236 return -ENODEV;
237}
238#endif /* CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS */
239
Will Deacon45ae7cf2013-06-24 18:31:25 +0100240static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
241{
242 int idx;
243
244 do {
245 idx = find_next_zero_bit(map, end, start);
246 if (idx == end)
247 return -ENOSPC;
248 } while (test_and_set_bit(idx, map));
249
250 return idx;
251}
252
253static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
254{
255 clear_bit(idx, map);
256}
257
258/* Wait for any pending TLB invalidations to complete */
Robin Murphy19713fd2019-08-15 19:37:30 +0100259static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
260 int sync, int status)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100261{
Robin Murphy8513c892017-03-30 17:56:32 +0100262 unsigned int spin_cnt, delay;
Robin Murphy19713fd2019-08-15 19:37:30 +0100263 u32 reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100264
Robin Murphyae2b60f2019-09-18 17:17:50 +0100265 if (smmu->impl && unlikely(smmu->impl->tlb_sync))
266 return smmu->impl->tlb_sync(smmu, page, sync, status);
267
Robin Murphy19713fd2019-08-15 19:37:30 +0100268 arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
Robin Murphy8513c892017-03-30 17:56:32 +0100269 for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
270 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100271 reg = arm_smmu_readl(smmu, page, status);
Will Deaconfba6e962020-01-10 13:20:03 +0000272 if (!(reg & ARM_SMMU_sTLBGSTATUS_GSACTIVE))
Robin Murphy8513c892017-03-30 17:56:32 +0100273 return;
274 cpu_relax();
Will Deacon45ae7cf2013-06-24 18:31:25 +0100275 }
Robin Murphy8513c892017-03-30 17:56:32 +0100276 udelay(delay);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100277 }
Robin Murphy8513c892017-03-30 17:56:32 +0100278 dev_err_ratelimited(smmu->dev,
279 "TLB sync timed out -- SMMU may be deadlocked\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +0100280}
281
Robin Murphy11febfc2017-03-30 17:56:31 +0100282static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
Will Deacon1463fe42013-07-31 19:21:27 +0100283{
Will Deacon8e517e72017-07-06 15:55:48 +0100284 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100285
Will Deacon8e517e72017-07-06 15:55:48 +0100286 spin_lock_irqsave(&smmu->global_sync_lock, flags);
Robin Murphy00320ce2019-08-15 19:37:31 +0100287 __arm_smmu_tlb_sync(smmu, ARM_SMMU_GR0, ARM_SMMU_GR0_sTLBGSYNC,
Robin Murphy19713fd2019-08-15 19:37:30 +0100288 ARM_SMMU_GR0_sTLBGSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100289 spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000290}
291
Robin Murphyae2b60f2019-09-18 17:17:50 +0100292static void arm_smmu_tlb_sync_context(struct arm_smmu_domain *smmu_domain)
Will Deacon1463fe42013-07-31 19:21:27 +0100293{
Robin Murphy11febfc2017-03-30 17:56:31 +0100294 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon8e517e72017-07-06 15:55:48 +0100295 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100296
Will Deacon8e517e72017-07-06 15:55:48 +0100297 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy19713fd2019-08-15 19:37:30 +0100298 __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx),
299 ARM_SMMU_CB_TLBSYNC, ARM_SMMU_CB_TLBSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100300 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000301}
302
Robin Murphy11febfc2017-03-30 17:56:31 +0100303static void arm_smmu_tlb_inv_context_s1(void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000304{
305 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy44f68762018-09-20 17:10:27 +0100306 /*
Robin Murphy19713fd2019-08-15 19:37:30 +0100307 * The TLBI write may be relaxed, so ensure that PTEs cleared by the
308 * current CPU are visible beforehand.
Robin Murphy44f68762018-09-20 17:10:27 +0100309 */
Robin Murphy19713fd2019-08-15 19:37:30 +0100310 wmb();
311 arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
312 ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
Robin Murphyae2b60f2019-09-18 17:17:50 +0100313 arm_smmu_tlb_sync_context(smmu_domain);
Robin Murphy11febfc2017-03-30 17:56:31 +0100314}
315
316static void arm_smmu_tlb_inv_context_s2(void *cookie)
317{
318 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100319 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100320
Robin Murphy00320ce2019-08-15 19:37:31 +0100321 /* See above */
322 wmb();
323 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
Robin Murphy11febfc2017-03-30 17:56:31 +0100324 arm_smmu_tlb_sync_global(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100325}
326
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100327static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
Robin Murphy3370cb62019-09-18 17:17:49 +0100328 size_t granule, void *cookie, int reg)
Will Deacon518f7132014-11-14 17:17:54 +0000329{
330 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100331 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000332 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy3370cb62019-09-18 17:17:49 +0100333 int idx = cfg->cbndx;
Will Deacon518f7132014-11-14 17:17:54 +0000334
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100335 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
Will Deacon7d321bd32018-10-01 12:42:49 +0100336 wmb();
337
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100338 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
339 iova = (iova >> 12) << 12;
340 iova |= cfg->asid;
Robin Murphy75df1382015-12-07 18:18:52 +0000341 do {
Robin Murphy19713fd2019-08-15 19:37:30 +0100342 arm_smmu_cb_write(smmu, idx, reg, iova);
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100343 iova += granule;
344 } while (size -= granule);
345 } else {
346 iova >>= 12;
347 iova |= (u64)cfg->asid << 48;
348 do {
Robin Murphy19713fd2019-08-15 19:37:30 +0100349 arm_smmu_cb_writeq(smmu, idx, reg, iova);
Robin Murphy75df1382015-12-07 18:18:52 +0000350 iova += granule >> 12;
351 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000352 }
353}
354
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100355static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
Robin Murphy3370cb62019-09-18 17:17:49 +0100356 size_t granule, void *cookie, int reg)
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100357{
358 struct arm_smmu_domain *smmu_domain = cookie;
359 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy3370cb62019-09-18 17:17:49 +0100360 int idx = smmu_domain->cfg.cbndx;
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100361
362 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
363 wmb();
364
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100365 iova >>= 12;
366 do {
Robin Murphy61005762019-08-15 19:37:28 +0100367 if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy19713fd2019-08-15 19:37:30 +0100368 arm_smmu_cb_writeq(smmu, idx, reg, iova);
Robin Murphy61005762019-08-15 19:37:28 +0100369 else
Robin Murphy19713fd2019-08-15 19:37:30 +0100370 arm_smmu_cb_write(smmu, idx, reg, iova);
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100371 iova += granule >> 12;
372 } while (size -= granule);
373}
374
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100375static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size,
376 size_t granule, void *cookie)
377{
Robin Murphy3370cb62019-09-18 17:17:49 +0100378 arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
379 ARM_SMMU_CB_S1_TLBIVA);
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100380 arm_smmu_tlb_sync_context(cookie);
381}
382
383static void arm_smmu_tlb_inv_leaf_s1(unsigned long iova, size_t size,
384 size_t granule, void *cookie)
385{
Robin Murphy3370cb62019-09-18 17:17:49 +0100386 arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
387 ARM_SMMU_CB_S1_TLBIVAL);
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100388 arm_smmu_tlb_sync_context(cookie);
389}
390
391static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather,
392 unsigned long iova, size_t granule,
393 void *cookie)
394{
Robin Murphy3370cb62019-09-18 17:17:49 +0100395 arm_smmu_tlb_inv_range_s1(iova, granule, granule, cookie,
396 ARM_SMMU_CB_S1_TLBIVAL);
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100397}
398
399static void arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size,
400 size_t granule, void *cookie)
401{
Robin Murphy3370cb62019-09-18 17:17:49 +0100402 arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie,
403 ARM_SMMU_CB_S2_TLBIIPAS2);
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100404 arm_smmu_tlb_sync_context(cookie);
405}
406
407static void arm_smmu_tlb_inv_leaf_s2(unsigned long iova, size_t size,
408 size_t granule, void *cookie)
409{
Robin Murphy3370cb62019-09-18 17:17:49 +0100410 arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie,
411 ARM_SMMU_CB_S2_TLBIIPAS2L);
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100412 arm_smmu_tlb_sync_context(cookie);
413}
414
415static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather,
416 unsigned long iova, size_t granule,
417 void *cookie)
418{
Robin Murphy3370cb62019-09-18 17:17:49 +0100419 arm_smmu_tlb_inv_range_s2(iova, granule, granule, cookie,
420 ARM_SMMU_CB_S2_TLBIIPAS2L);
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100421}
422
423static void arm_smmu_tlb_inv_any_s2_v1(unsigned long iova, size_t size,
424 size_t granule, void *cookie)
425{
426 arm_smmu_tlb_inv_context_s2(cookie);
427}
Robin Murphy11febfc2017-03-30 17:56:31 +0100428/*
429 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
430 * almost negligible, but the benefit of getting the first one in as far ahead
431 * of the sync as possible is significant, hence we don't just make this a
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100432 * no-op and call arm_smmu_tlb_inv_context_s2() from .iotlb_sync as you might
433 * think.
Robin Murphy11febfc2017-03-30 17:56:31 +0100434 */
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100435static void arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather,
436 unsigned long iova, size_t granule,
437 void *cookie)
Robin Murphy11febfc2017-03-30 17:56:31 +0100438{
439 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy00320ce2019-08-15 19:37:31 +0100440 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy11febfc2017-03-30 17:56:31 +0100441
Robin Murphy00320ce2019-08-15 19:37:31 +0100442 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
Will Deacon7d321bd32018-10-01 12:42:49 +0100443 wmb();
444
Robin Murphy00320ce2019-08-15 19:37:31 +0100445 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
Robin Murphy11febfc2017-03-30 17:56:31 +0100446}
447
Robin Murphy696bcfb2019-09-18 17:17:51 +0100448static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = {
449 .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
450 .tlb_flush_walk = arm_smmu_tlb_inv_walk_s1,
451 .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s1,
452 .tlb_add_page = arm_smmu_tlb_add_page_s1,
Robin Murphy11febfc2017-03-30 17:56:31 +0100453};
454
Robin Murphy696bcfb2019-09-18 17:17:51 +0100455static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
456 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
457 .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2,
458 .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s2,
459 .tlb_add_page = arm_smmu_tlb_add_page_s2,
Robin Murphy11febfc2017-03-30 17:56:31 +0100460};
461
Robin Murphy696bcfb2019-09-18 17:17:51 +0100462static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
463 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
464 .tlb_flush_walk = arm_smmu_tlb_inv_any_s2_v1,
465 .tlb_flush_leaf = arm_smmu_tlb_inv_any_s2_v1,
466 .tlb_add_page = arm_smmu_tlb_add_page_s2_v1,
Will Deacon518f7132014-11-14 17:17:54 +0000467};
468
Will Deacon45ae7cf2013-06-24 18:31:25 +0100469static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
470{
Vivek Gautambc580b52019-04-22 12:40:36 +0530471 u32 fsr, fsynr, cbfrsynra;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100472 unsigned long iova;
473 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100474 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100475 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy19713fd2019-08-15 19:37:30 +0100476 int idx = smmu_domain->cfg.cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100477
Robin Murphy19713fd2019-08-15 19:37:30 +0100478 fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
Will Deaconfba6e962020-01-10 13:20:03 +0000479 if (!(fsr & ARM_SMMU_FSR_FAULT))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100480 return IRQ_NONE;
481
Robin Murphy19713fd2019-08-15 19:37:30 +0100482 fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
483 iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
484 cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100485
Will Deacon3714ce1d2016-08-05 19:49:45 +0100486 dev_err_ratelimited(smmu->dev,
Vivek Gautambc580b52019-04-22 12:40:36 +0530487 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
Robin Murphy19713fd2019-08-15 19:37:30 +0100488 fsr, iova, fsynr, cbfrsynra, idx);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100489
Robin Murphy19713fd2019-08-15 19:37:30 +0100490 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100491 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100492}
493
494static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
495{
496 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
497 struct arm_smmu_device *smmu = dev;
Robin Murphy931a0ba2019-09-17 15:45:34 +0100498 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
499 DEFAULT_RATELIMIT_BURST);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100500
Robin Murphy00320ce2019-08-15 19:37:31 +0100501 gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
502 gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0);
503 gfsynr1 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR1);
504 gfsynr2 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100505
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000506 if (!gfsr)
507 return IRQ_NONE;
508
Robin Murphy931a0ba2019-09-17 15:45:34 +0100509 if (__ratelimit(&rs)) {
510 if (IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT) &&
Will Deaconfba6e962020-01-10 13:20:03 +0000511 (gfsr & ARM_SMMU_sGFSR_USF))
Robin Murphy931a0ba2019-09-17 15:45:34 +0100512 dev_err(smmu->dev,
513 "Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may have security implications\n",
514 (u16)gfsynr1);
515 else
516 dev_err(smmu->dev,
517 "Unexpected global fault, this could be serious\n");
518 dev_err(smmu->dev,
519 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
520 gfsr, gfsynr0, gfsynr1, gfsynr2);
521 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100522
Robin Murphy00320ce2019-08-15 19:37:31 +0100523 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr);
Will Deaconadaba322013-07-31 19:21:26 +0100524 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100525}
526
Will Deacon518f7132014-11-14 17:17:54 +0000527static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
528 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100529{
Will Deacon44680ee2014-06-25 11:29:12 +0100530 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy90df3732017-08-08 14:56:14 +0100531 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
532 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
533
534 cb->cfg = cfg;
535
Robin Murphy620565a2019-08-15 19:37:25 +0100536 /* TCR */
Robin Murphy90df3732017-08-08 14:56:14 +0100537 if (stage1) {
538 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
539 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
540 } else {
Robin Murphyfb485eb2019-10-25 19:08:38 +0100541 cb->tcr[0] = arm_smmu_lpae_tcr(pgtbl_cfg);
542 cb->tcr[1] = arm_smmu_lpae_tcr2(pgtbl_cfg);
Robin Murphy90df3732017-08-08 14:56:14 +0100543 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
Will Deaconfba6e962020-01-10 13:20:03 +0000544 cb->tcr[1] |= ARM_SMMU_TCR2_AS;
Robin Murphyfb485eb2019-10-25 19:08:38 +0100545 else
Will Deaconfba6e962020-01-10 13:20:03 +0000546 cb->tcr[0] |= ARM_SMMU_TCR_EAE;
Robin Murphy90df3732017-08-08 14:56:14 +0100547 }
548 } else {
Will Deaconac4b80e2020-01-10 14:51:59 +0000549 cb->tcr[0] = arm_smmu_lpae_vtcr(pgtbl_cfg);
Robin Murphy90df3732017-08-08 14:56:14 +0100550 }
551
552 /* TTBRs */
553 if (stage1) {
554 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
Robin Murphyd1e5f262019-10-25 19:08:37 +0100555 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr;
556 cb->ttbr[1] = 0;
Robin Murphy90df3732017-08-08 14:56:14 +0100557 } else {
Robin Murphyd1e5f262019-10-25 19:08:37 +0100558 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
Will Deaconfba6e962020-01-10 13:20:03 +0000559 cb->ttbr[0] |= FIELD_PREP(ARM_SMMU_TTBRn_ASID,
560 cfg->asid);
561 cb->ttbr[1] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
562 cfg->asid);
Robin Murphy90df3732017-08-08 14:56:14 +0100563 }
564 } else {
565 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
566 }
567
568 /* MAIRs (stage-1 only) */
569 if (stage1) {
570 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
571 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
572 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
573 } else {
Robin Murphy205577a2019-10-25 19:08:36 +0100574 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair;
575 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair >> 32;
Robin Murphy90df3732017-08-08 14:56:14 +0100576 }
577 }
578}
579
580static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
581{
582 u32 reg;
583 bool stage1;
584 struct arm_smmu_cb *cb = &smmu->cbs[idx];
585 struct arm_smmu_cfg *cfg = cb->cfg;
Robin Murphy90df3732017-08-08 14:56:14 +0100586
587 /* Unassigned context banks only need disabling */
588 if (!cfg) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100589 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, 0);
Robin Murphy90df3732017-08-08 14:56:14 +0100590 return;
591 }
592
Will Deacon44680ee2014-06-25 11:29:12 +0100593 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100594
Robin Murphy90df3732017-08-08 14:56:14 +0100595 /* CBA2R */
Will Deacon4a1c93c2015-03-04 12:21:03 +0000596 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100597 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
Will Deaconfba6e962020-01-10 13:20:03 +0000598 reg = ARM_SMMU_CBA2R_VA64;
Robin Murphy7602b872016-04-28 17:12:09 +0100599 else
Robin Murphy5114e962019-08-15 19:37:24 +0100600 reg = 0;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800601 /* 16-bit VMIDs live in CBA2R */
602 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Will Deaconfba6e962020-01-10 13:20:03 +0000603 reg |= FIELD_PREP(ARM_SMMU_CBA2R_VMID16, cfg->vmid);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800604
Robin Murphyaadbf212019-08-15 19:37:29 +0100605 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBA2R(idx), reg);
Will Deacon4a1c93c2015-03-04 12:21:03 +0000606 }
607
Will Deacon45ae7cf2013-06-24 18:31:25 +0100608 /* CBAR */
Will Deaconfba6e962020-01-10 13:20:03 +0000609 reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, cfg->cbar);
Robin Murphyb7862e32016-04-13 18:13:03 +0100610 if (smmu->version < ARM_SMMU_V2)
Will Deaconfba6e962020-01-10 13:20:03 +0000611 reg |= FIELD_PREP(ARM_SMMU_CBAR_IRPTNDX, cfg->irptndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100612
Will Deacon57ca90f2014-02-06 14:59:05 +0000613 /*
614 * Use the weakest shareability/memory types, so they are
615 * overridden by the ttbcr/pte.
616 */
617 if (stage1) {
Will Deaconfba6e962020-01-10 13:20:03 +0000618 reg |= FIELD_PREP(ARM_SMMU_CBAR_S1_BPSHCFG,
619 ARM_SMMU_CBAR_S1_BPSHCFG_NSH) |
620 FIELD_PREP(ARM_SMMU_CBAR_S1_MEMATTR,
621 ARM_SMMU_CBAR_S1_MEMATTR_WB);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800622 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
623 /* 8-bit VMIDs live in CBAR */
Will Deaconfba6e962020-01-10 13:20:03 +0000624 reg |= FIELD_PREP(ARM_SMMU_CBAR_VMID, cfg->vmid);
Will Deacon57ca90f2014-02-06 14:59:05 +0000625 }
Robin Murphyaadbf212019-08-15 19:37:29 +0100626 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100627
Sunil Goutham125458a2017-03-28 16:11:12 +0530628 /*
Robin Murphy620565a2019-08-15 19:37:25 +0100629 * TCR
Sunil Goutham125458a2017-03-28 16:11:12 +0530630 * We must write this before the TTBRs, since it determines the
631 * access behaviour of some fields (in particular, ASID[15:8]).
632 */
Robin Murphy90df3732017-08-08 14:56:14 +0100633 if (stage1 && smmu->version > ARM_SMMU_V1)
Robin Murphy19713fd2019-08-15 19:37:30 +0100634 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]);
635 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100636
Will Deacon45ae7cf2013-06-24 18:31:25 +0100637 /* TTBRs */
Robin Murphy90df3732017-08-08 14:56:14 +0100638 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100639 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid);
640 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
641 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100642 } else {
Robin Murphy19713fd2019-08-15 19:37:30 +0100643 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
Robin Murphy90df3732017-08-08 14:56:14 +0100644 if (stage1)
Robin Murphy19713fd2019-08-15 19:37:30 +0100645 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR1,
646 cb->ttbr[1]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100647 }
648
Will Deacon518f7132014-11-14 17:17:54 +0000649 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100650 if (stage1) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100651 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]);
652 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100653 }
654
Will Deacon45ae7cf2013-06-24 18:31:25 +0100655 /* SCTLR */
Will Deaconfba6e962020-01-10 13:20:03 +0000656 reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE | ARM_SMMU_SCTLR_AFE |
657 ARM_SMMU_SCTLR_TRE | ARM_SMMU_SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100658 if (stage1)
Will Deaconfba6e962020-01-10 13:20:03 +0000659 reg |= ARM_SMMU_SCTLR_S1_ASIDPNE;
Robin Murphy90df3732017-08-08 14:56:14 +0100660 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
Will Deaconfba6e962020-01-10 13:20:03 +0000661 reg |= ARM_SMMU_SCTLR_E;
Robin Murphy90df3732017-08-08 14:56:14 +0100662
Robin Murphy19713fd2019-08-15 19:37:30 +0100663 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100664}
665
666static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100667 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100668{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100669 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000670 unsigned long ias, oas;
671 struct io_pgtable_ops *pgtbl_ops;
672 struct io_pgtable_cfg pgtbl_cfg;
673 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100674 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100675 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100676
Will Deacon518f7132014-11-14 17:17:54 +0000677 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100678 if (smmu_domain->smmu)
679 goto out_unlock;
680
Will Deacon61bc6712017-01-06 16:56:03 +0000681 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
682 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
683 smmu_domain->smmu = smmu;
684 goto out_unlock;
685 }
686
Will Deaconc752ce42014-06-25 22:46:31 +0100687 /*
688 * Mapping the requested stage onto what we support is surprisingly
689 * complicated, mainly because the spec allows S1+S2 SMMUs without
690 * support for nested translation. That means we end up with the
691 * following table:
692 *
693 * Requested Supported Actual
694 * S1 N S1
695 * S1 S1+S2 S1
696 * S1 S2 S2
697 * S1 S1 S1
698 * N N N
699 * N S1+S2 S2
700 * N S2 S2
701 * N S1 S1
702 *
703 * Note that you can't actually request stage-2 mappings.
704 */
705 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
706 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
707 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
708 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
709
Robin Murphy7602b872016-04-28 17:12:09 +0100710 /*
711 * Choosing a suitable context format is even more fiddly. Until we
712 * grow some way for the caller to express a preference, and/or move
713 * the decision into the io-pgtable code where it arguably belongs,
714 * just aim for the closest thing to the rest of the system, and hope
715 * that the hardware isn't esoteric enough that we can't assume AArch64
716 * support to be a superset of AArch32 support...
717 */
718 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
719 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100720 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
721 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
722 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
723 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
724 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100725 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
726 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
727 ARM_SMMU_FEAT_FMT_AARCH64_16K |
728 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
729 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
730
731 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
732 ret = -EINVAL;
733 goto out_unlock;
734 }
735
Will Deaconc752ce42014-06-25 22:46:31 +0100736 switch (smmu_domain->stage) {
737 case ARM_SMMU_DOMAIN_S1:
738 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
739 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000740 ias = smmu->va_size;
741 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100742 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000743 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +0100744 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +0000745 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100746 ias = min(ias, 32UL);
747 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +0100748 } else {
749 fmt = ARM_V7S;
750 ias = min(ias, 32UL);
751 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +0100752 }
Will Deaconabfd6fe2019-07-02 16:44:41 +0100753 smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops;
Will Deaconc752ce42014-06-25 22:46:31 +0100754 break;
755 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100756 /*
757 * We will likely want to change this if/when KVM gets
758 * involved.
759 */
Will Deaconc752ce42014-06-25 22:46:31 +0100760 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100761 cfg->cbar = CBAR_TYPE_S2_TRANS;
762 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000763 ias = smmu->ipa_size;
764 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100765 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000766 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100767 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000768 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100769 ias = min(ias, 40UL);
770 oas = min(oas, 40UL);
771 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100772 if (smmu->version == ARM_SMMU_V2)
Will Deaconabfd6fe2019-07-02 16:44:41 +0100773 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2;
Robin Murphy11febfc2017-03-30 17:56:31 +0100774 else
Will Deaconabfd6fe2019-07-02 16:44:41 +0100775 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1;
Will Deaconc752ce42014-06-25 22:46:31 +0100776 break;
777 default:
778 ret = -EINVAL;
779 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100780 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100781 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
782 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200783 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100784 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100785
Will Deacon44680ee2014-06-25 11:29:12 +0100786 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100787 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100788 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
789 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100790 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100791 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100792 }
793
Robin Murphy280b6832017-03-30 17:56:29 +0100794 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
Robin Murphyba7e4a02019-08-15 19:37:37 +0100795 cfg->vmid = cfg->cbndx + 1;
Robin Murphy280b6832017-03-30 17:56:29 +0100796 else
Robin Murphyba7e4a02019-08-15 19:37:37 +0100797 cfg->asid = cfg->cbndx;
798
799 smmu_domain->smmu = smmu;
800 if (smmu->impl && smmu->impl->init_context) {
801 ret = smmu->impl->init_context(smmu_domain);
802 if (ret)
803 goto out_unlock;
804 }
Robin Murphy280b6832017-03-30 17:56:29 +0100805
Will Deacon518f7132014-11-14 17:17:54 +0000806 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100807 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000808 .ias = ias,
809 .oas = oas,
Will Deacon4f418452019-06-25 12:51:25 +0100810 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
Robin Murphy696bcfb2019-09-18 17:17:51 +0100811 .tlb = smmu_domain->flush_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100812 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000813 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100814
Robin Murphy44f68762018-09-20 17:10:27 +0100815 if (smmu_domain->non_strict)
816 pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
817
Will Deacon518f7132014-11-14 17:17:54 +0000818 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
819 if (!pgtbl_ops) {
820 ret = -ENOMEM;
821 goto out_clear_smmu;
822 }
823
Robin Murphyd5466352016-05-09 17:20:09 +0100824 /* Update the domain's page sizes to reflect the page table format */
825 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Robin Murphy455eb7d2016-09-12 17:13:58 +0100826 domain->geometry.aperture_end = (1UL << ias) - 1;
827 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +0000828
829 /* Initialise the context bank with our page table cfg */
830 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
Robin Murphy90df3732017-08-08 14:56:14 +0100831 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000832
833 /*
834 * Request context fault interrupt. Do this last to avoid the
835 * handler seeing a half-initialised domain state.
836 */
Will Deacon44680ee2014-06-25 11:29:12 +0100837 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800838 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
839 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200840 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100841 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100842 cfg->irptndx, irq);
Will Deaconfba6e962020-01-10 13:20:03 +0000843 cfg->irptndx = ARM_SMMU_INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100844 }
845
Will Deacon518f7132014-11-14 17:17:54 +0000846 mutex_unlock(&smmu_domain->init_mutex);
847
848 /* Publish page table ops for map/unmap */
849 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100850 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100851
Will Deacon518f7132014-11-14 17:17:54 +0000852out_clear_smmu:
Liu Xiang6db7bfb2019-09-16 21:53:00 +0800853 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000854 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100855out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +0000856 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100857 return ret;
858}
859
860static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
861{
Joerg Roedel1d672632015-03-26 13:43:10 +0100862 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100863 struct arm_smmu_device *smmu = smmu_domain->smmu;
864 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Sricharan Rd4a44f02018-12-04 11:52:10 +0530865 int ret, irq;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100866
Will Deacon61bc6712017-01-06 16:56:03 +0000867 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100868 return;
869
Sricharan Rd4a44f02018-12-04 11:52:10 +0530870 ret = arm_smmu_rpm_get(smmu);
871 if (ret < 0)
872 return;
873
Will Deacon518f7132014-11-14 17:17:54 +0000874 /*
875 * Disable the context bank and free the page tables before freeing
876 * it.
877 */
Robin Murphy90df3732017-08-08 14:56:14 +0100878 smmu->cbs[cfg->cbndx].cfg = NULL;
879 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100880
Will Deaconfba6e962020-01-10 13:20:03 +0000881 if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) {
Will Deacon44680ee2014-06-25 11:29:12 +0100882 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800883 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100884 }
885
Markus Elfring44830b02015-11-06 18:32:41 +0100886 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +0100887 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Sricharan Rd4a44f02018-12-04 11:52:10 +0530888
889 arm_smmu_rpm_put(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100890}
891
Joerg Roedel1d672632015-03-26 13:43:10 +0100892static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100893{
894 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100895
Will Deacon61bc6712017-01-06 16:56:03 +0000896 if (type != IOMMU_DOMAIN_UNMANAGED &&
897 type != IOMMU_DOMAIN_DMA &&
898 type != IOMMU_DOMAIN_IDENTITY)
Joerg Roedel1d672632015-03-26 13:43:10 +0100899 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100900 /*
901 * Allocate the domain and initialise some of its data structures.
902 * We can't really do anything meaningful until we've added a
903 * master.
904 */
905 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
906 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +0100907 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100908
Robin Murphy021bb842016-09-14 15:26:46 +0100909 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
910 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +0000911 kfree(smmu_domain);
912 return NULL;
913 }
914
Will Deacon518f7132014-11-14 17:17:54 +0000915 mutex_init(&smmu_domain->init_mutex);
Robin Murphy523d7422017-06-22 16:53:56 +0100916 spin_lock_init(&smmu_domain->cb_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +0100917
918 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100919}
920
Joerg Roedel1d672632015-03-26 13:43:10 +0100921static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100922{
Joerg Roedel1d672632015-03-26 13:43:10 +0100923 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +0100924
925 /*
926 * Free the domain resources. We assume that all devices have
927 * already been detached.
928 */
Robin Murphy9adb9592016-01-26 18:06:36 +0000929 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100930 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100931 kfree(smmu_domain);
932}
933
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100934static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
935{
936 struct arm_smmu_smr *smr = smmu->smrs + idx;
Will Deaconfba6e962020-01-10 13:20:03 +0000937 u32 reg = FIELD_PREP(ARM_SMMU_SMR_ID, smr->id) |
938 FIELD_PREP(ARM_SMMU_SMR_MASK, smr->mask);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100939
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300940 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
Will Deaconfba6e962020-01-10 13:20:03 +0000941 reg |= ARM_SMMU_SMR_VALID;
Robin Murphy00320ce2019-08-15 19:37:31 +0100942 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100943}
944
Robin Murphy8e8b2032016-09-12 17:13:50 +0100945static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
946{
947 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
Will Deaconfba6e962020-01-10 13:20:03 +0000948 u32 reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) |
949 FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) |
950 FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
Robin Murphy8e8b2032016-09-12 17:13:50 +0100951
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300952 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
953 smmu->smrs[idx].valid)
Will Deaconfba6e962020-01-10 13:20:03 +0000954 reg |= ARM_SMMU_S2CR_EXIDVALID;
Robin Murphy00320ce2019-08-15 19:37:31 +0100955 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
Robin Murphy8e8b2032016-09-12 17:13:50 +0100956}
957
958static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
959{
960 arm_smmu_write_s2cr(smmu, idx);
961 if (smmu->smrs)
962 arm_smmu_write_smr(smmu, idx);
963}
964
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300965/*
966 * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
967 * should be called after sCR0 is written.
968 */
969static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
970{
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300971 u32 smr;
Robin Murphy79f7a5c2020-01-10 15:25:02 +0000972 int i;
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300973
974 if (!smmu->smrs)
975 return;
Robin Murphy79f7a5c2020-01-10 15:25:02 +0000976 /*
977 * If we've had to accommodate firmware memory regions, we may
978 * have live SMRs by now; tread carefully...
979 *
980 * Somewhat perversely, not having a free SMR for this test implies we
981 * can get away without it anyway, as we'll only be able to 'allocate'
982 * these SMRs for the ID/mask values we're already trusting to be OK.
983 */
984 for (i = 0; i < smmu->num_mapping_groups; i++)
985 if (!smmu->smrs[i].valid)
986 goto smr_ok;
987 return;
988smr_ok:
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300989 /*
990 * SMR.ID bits may not be preserved if the corresponding MASK
991 * bits are set, so check each one separately. We can reject
992 * masters later if they try to claim IDs outside these masks.
993 */
Will Deaconfba6e962020-01-10 13:20:03 +0000994 smr = FIELD_PREP(ARM_SMMU_SMR_ID, smmu->streamid_mask);
Robin Murphy79f7a5c2020-01-10 15:25:02 +0000995 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
996 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
Will Deaconfba6e962020-01-10 13:20:03 +0000997 smmu->streamid_mask = FIELD_GET(ARM_SMMU_SMR_ID, smr);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300998
Will Deaconfba6e962020-01-10 13:20:03 +0000999 smr = FIELD_PREP(ARM_SMMU_SMR_MASK, smmu->streamid_mask);
Robin Murphy79f7a5c2020-01-10 15:25:02 +00001000 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
1001 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
Will Deaconfba6e962020-01-10 13:20:03 +00001002 smmu->smr_mask_mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001003}
1004
Robin Murphy588888a2016-09-12 17:13:54 +01001005static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001006{
1007 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy588888a2016-09-12 17:13:54 +01001008 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001009
Robin Murphy588888a2016-09-12 17:13:54 +01001010 /* Stream indexing is blissfully easy */
1011 if (!smrs)
1012 return id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001013
Robin Murphy588888a2016-09-12 17:13:54 +01001014 /* Validating SMRs is... less so */
1015 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1016 if (!smrs[i].valid) {
1017 /*
1018 * Note the first free entry we come across, which
1019 * we'll claim in the end if nothing else matches.
1020 */
1021 if (free_idx < 0)
1022 free_idx = i;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001023 continue;
1024 }
Robin Murphy588888a2016-09-12 17:13:54 +01001025 /*
1026 * If the new entry is _entirely_ matched by an existing entry,
1027 * then reuse that, with the guarantee that there also cannot
1028 * be any subsequent conflicting entries. In normal use we'd
1029 * expect simply identical entries for this case, but there's
1030 * no harm in accommodating the generalisation.
1031 */
1032 if ((mask & smrs[i].mask) == mask &&
1033 !((id ^ smrs[i].id) & ~smrs[i].mask))
1034 return i;
1035 /*
1036 * If the new entry has any other overlap with an existing one,
1037 * though, then there always exists at least one stream ID
1038 * which would cause a conflict, and we can't allow that risk.
1039 */
1040 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1041 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001042 }
1043
Robin Murphy588888a2016-09-12 17:13:54 +01001044 return free_idx;
1045}
1046
1047static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1048{
1049 if (--smmu->s2crs[idx].count)
1050 return false;
1051
1052 smmu->s2crs[idx] = s2cr_init_val;
1053 if (smmu->smrs)
1054 smmu->smrs[idx].valid = false;
1055
1056 return true;
1057}
1058
1059static int arm_smmu_master_alloc_smes(struct device *dev)
1060{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001061 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Joerg Roedelc84500a2020-03-26 16:08:36 +01001062 struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
Robin Murphy588888a2016-09-12 17:13:54 +01001063 struct arm_smmu_device *smmu = cfg->smmu;
1064 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy588888a2016-09-12 17:13:54 +01001065 int i, idx, ret;
1066
1067 mutex_lock(&smmu->stream_map_mutex);
1068 /* Figure out a viable stream map entry allocation */
Robin Murphy24651702020-03-26 16:08:35 +01001069 for_each_cfg_sme(cfg, fwspec, i, idx) {
Will Deaconfba6e962020-01-10 13:20:03 +00001070 u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
1071 u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
Robin Murphy021bb842016-09-14 15:26:46 +01001072
Robin Murphy588888a2016-09-12 17:13:54 +01001073 if (idx != INVALID_SMENDX) {
1074 ret = -EEXIST;
1075 goto out_err;
1076 }
1077
Robin Murphy021bb842016-09-14 15:26:46 +01001078 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy588888a2016-09-12 17:13:54 +01001079 if (ret < 0)
1080 goto out_err;
1081
1082 idx = ret;
1083 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy021bb842016-09-14 15:26:46 +01001084 smrs[idx].id = sid;
1085 smrs[idx].mask = mask;
Robin Murphy588888a2016-09-12 17:13:54 +01001086 smrs[idx].valid = true;
1087 }
1088 smmu->s2crs[idx].count++;
1089 cfg->smendx[i] = (s16)idx;
1090 }
1091
Will Deacon45ae7cf2013-06-24 18:31:25 +01001092 /* It worked! Now, poke the actual hardware */
Joerg Roedelcefa0d52020-04-29 15:36:55 +02001093 for_each_cfg_sme(cfg, fwspec, i, idx)
Robin Murphy588888a2016-09-12 17:13:54 +01001094 arm_smmu_write_sme(smmu, idx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001095
Robin Murphy588888a2016-09-12 17:13:54 +01001096 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001097 return 0;
1098
Robin Murphy588888a2016-09-12 17:13:54 +01001099out_err:
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001100 while (i--) {
Robin Murphy588888a2016-09-12 17:13:54 +01001101 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001102 cfg->smendx[i] = INVALID_SMENDX;
1103 }
Robin Murphy588888a2016-09-12 17:13:54 +01001104 mutex_unlock(&smmu->stream_map_mutex);
1105 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001106}
1107
Robin Murphy24651702020-03-26 16:08:35 +01001108static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg,
1109 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001110{
Robin Murphy24651702020-03-26 16:08:35 +01001111 struct arm_smmu_device *smmu = cfg->smmu;
Robin Murphyd3097e32016-09-12 17:13:53 +01001112 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001113
Robin Murphy588888a2016-09-12 17:13:54 +01001114 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy24651702020-03-26 16:08:35 +01001115 for_each_cfg_sme(cfg, fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001116 if (arm_smmu_free_sme(smmu, idx))
1117 arm_smmu_write_sme(smmu, idx);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001118 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001119 }
Robin Murphy588888a2016-09-12 17:13:54 +01001120 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001121}
1122
Will Deacon45ae7cf2013-06-24 18:31:25 +01001123static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphy24651702020-03-26 16:08:35 +01001124 struct arm_smmu_master_cfg *cfg,
Robin Murphyadfec2e2016-09-12 17:13:55 +01001125 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001126{
Will Deacon44680ee2014-06-25 11:29:12 +01001127 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001128 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001129 u8 cbndx = smmu_domain->cfg.cbndx;
Will Deacon61bc6712017-01-06 16:56:03 +00001130 enum arm_smmu_s2cr_type type;
Robin Murphy588888a2016-09-12 17:13:54 +01001131 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001132
Will Deacon61bc6712017-01-06 16:56:03 +00001133 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
1134 type = S2CR_TYPE_BYPASS;
1135 else
1136 type = S2CR_TYPE_TRANS;
1137
Robin Murphy24651702020-03-26 16:08:35 +01001138 for_each_cfg_sme(cfg, fwspec, i, idx) {
Robin Murphy8e8b2032016-09-12 17:13:50 +01001139 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy588888a2016-09-12 17:13:54 +01001140 continue;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001141
Robin Murphy8e8b2032016-09-12 17:13:50 +01001142 s2cr[idx].type = type;
Sricharan Re1989802017-01-06 18:58:15 +05301143 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001144 s2cr[idx].cbndx = cbndx;
1145 arm_smmu_write_s2cr(smmu, idx);
Will Deacon43b412b2014-07-15 11:22:24 +01001146 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001147 return 0;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001148}
1149
Will Deacon45ae7cf2013-06-24 18:31:25 +01001150static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1151{
Joerg Roedel1d672632015-03-26 13:43:10 +01001152 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Joerg Roedelc84500a2020-03-26 16:08:36 +01001153 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphy24651702020-03-26 16:08:35 +01001154 struct arm_smmu_master_cfg *cfg;
1155 struct arm_smmu_device *smmu;
Joerg Roedelc84500a2020-03-26 16:08:36 +01001156 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001157
Robin Murphyadfec2e2016-09-12 17:13:55 +01001158 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001159 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1160 return -ENXIO;
1161 }
1162
Robin Murphyfba4f8e2016-10-17 12:06:21 +01001163 /*
1164 * FIXME: The arch/arm DMA API code tries to attach devices to its own
Joerg Roedelcefa0d52020-04-29 15:36:55 +02001165 * domains between of_xlate() and probe_device() - we have no way to cope
Robin Murphyfba4f8e2016-10-17 12:06:21 +01001166 * with that, so until ARM gets converted to rely on groups and default
1167 * domains, just say no (but more politely than by dereferencing NULL).
1168 * This should be at least a WARN_ON once that's sorted.
1169 */
Joerg Roedelc84500a2020-03-26 16:08:36 +01001170 cfg = dev_iommu_priv_get(dev);
Robin Murphy24651702020-03-26 16:08:35 +01001171 if (!cfg)
Robin Murphyfba4f8e2016-10-17 12:06:21 +01001172 return -ENODEV;
1173
Robin Murphy24651702020-03-26 16:08:35 +01001174 smmu = cfg->smmu;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301175
1176 ret = arm_smmu_rpm_get(smmu);
1177 if (ret < 0)
1178 return ret;
1179
Will Deacon518f7132014-11-14 17:17:54 +00001180 /* Ensure that the domain is finalised */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001181 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001182 if (ret < 0)
Sricharan Rd4a44f02018-12-04 11:52:10 +05301183 goto rpm_put;
Will Deacon518f7132014-11-14 17:17:54 +00001184
Will Deacon45ae7cf2013-06-24 18:31:25 +01001185 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001186 * Sanity check the domain. We don't support domains across
1187 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001188 */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001189 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001190 dev_err(dev,
1191 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphyadfec2e2016-09-12 17:13:55 +01001192 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Sricharan Rd4a44f02018-12-04 11:52:10 +05301193 ret = -EINVAL;
1194 goto rpm_put;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001195 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001196
1197 /* Looks ok, so add the device to the domain */
Robin Murphy24651702020-03-26 16:08:35 +01001198 ret = arm_smmu_domain_add_master(smmu_domain, cfg, fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301199
Rob Clarkee9bdfe2019-10-31 14:31:02 -07001200 /*
1201 * Setup an autosuspend delay to avoid bouncing runpm state.
1202 * Otherwise, if a driver for a suspended consumer device
1203 * unmaps buffers, it will runpm resume/suspend for each one.
1204 *
1205 * For example, when used by a GPU device, when an application
1206 * or game exits, it can trigger unmapping 100s or 1000s of
1207 * buffers. With a runpm cycle for each buffer, that adds up
1208 * to 5-10sec worth of reprogramming the context bank, while
1209 * the system appears to be locked up to the user.
1210 */
1211 pm_runtime_set_autosuspend_delay(smmu->dev, 20);
1212 pm_runtime_use_autosuspend(smmu->dev);
1213
Sricharan Rd4a44f02018-12-04 11:52:10 +05301214rpm_put:
1215 arm_smmu_rpm_put(smmu);
1216 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001217}
1218
Will Deacon45ae7cf2013-06-24 18:31:25 +01001219static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Tom Murphy781ca2d2019-09-08 09:56:38 -07001220 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001221{
Robin Murphy523d7422017-06-22 16:53:56 +01001222 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301223 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1224 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001225
Will Deacon518f7132014-11-14 17:17:54 +00001226 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001227 return -ENODEV;
1228
Sricharan Rd4a44f02018-12-04 11:52:10 +05301229 arm_smmu_rpm_get(smmu);
1230 ret = ops->map(ops, iova, paddr, size, prot);
1231 arm_smmu_rpm_put(smmu);
1232
1233 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001234}
1235
1236static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
Will Deacon56f8af52019-07-02 16:44:06 +01001237 size_t size, struct iommu_iotlb_gather *gather)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001238{
Robin Murphy523d7422017-06-22 16:53:56 +01001239 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301240 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1241 size_t ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001242
Will Deacon518f7132014-11-14 17:17:54 +00001243 if (!ops)
1244 return 0;
1245
Sricharan Rd4a44f02018-12-04 11:52:10 +05301246 arm_smmu_rpm_get(smmu);
Will Deacona2d3a382019-07-02 16:44:58 +01001247 ret = ops->unmap(ops, iova, size, gather);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301248 arm_smmu_rpm_put(smmu);
1249
1250 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001251}
1252
Robin Murphy44f68762018-09-20 17:10:27 +01001253static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
1254{
1255 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301256 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy44f68762018-09-20 17:10:27 +01001257
Will Deaconabfd6fe2019-07-02 16:44:41 +01001258 if (smmu_domain->flush_ops) {
Sricharan Rd4a44f02018-12-04 11:52:10 +05301259 arm_smmu_rpm_get(smmu);
Robin Murphy696bcfb2019-09-18 17:17:51 +01001260 smmu_domain->flush_ops->tlb_flush_all(smmu_domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301261 arm_smmu_rpm_put(smmu);
1262 }
Robin Murphy44f68762018-09-20 17:10:27 +01001263}
1264
Will Deacon56f8af52019-07-02 16:44:06 +01001265static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
1266 struct iommu_iotlb_gather *gather)
Robin Murphy32b12442017-09-28 15:55:01 +01001267{
1268 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301269 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy32b12442017-09-28 15:55:01 +01001270
Robin Murphyae2b60f2019-09-18 17:17:50 +01001271 if (!smmu)
1272 return;
1273
1274 arm_smmu_rpm_get(smmu);
1275 if (smmu->version == ARM_SMMU_V2 ||
1276 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1277 arm_smmu_tlb_sync_context(smmu_domain);
1278 else
1279 arm_smmu_tlb_sync_global(smmu);
1280 arm_smmu_rpm_put(smmu);
Robin Murphy32b12442017-09-28 15:55:01 +01001281}
1282
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001283static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1284 dma_addr_t iova)
1285{
Joerg Roedel1d672632015-03-26 13:43:10 +01001286 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001287 struct arm_smmu_device *smmu = smmu_domain->smmu;
1288 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1289 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1290 struct device *dev = smmu->dev;
Robin Murphy19713fd2019-08-15 19:37:30 +01001291 void __iomem *reg;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001292 u32 tmp;
1293 u64 phys;
Robin Murphy523d7422017-06-22 16:53:56 +01001294 unsigned long va, flags;
Robin Murphy19713fd2019-08-15 19:37:30 +01001295 int ret, idx = cfg->cbndx;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301296
1297 ret = arm_smmu_rpm_get(smmu);
1298 if (ret < 0)
1299 return 0;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001300
Robin Murphy523d7422017-06-22 16:53:56 +01001301 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy661d9622015-05-27 17:09:34 +01001302 va = iova & ~0xfffUL;
Robin Murphy61005762019-08-15 19:37:28 +01001303 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy19713fd2019-08-15 19:37:30 +01001304 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
Robin Murphy61005762019-08-15 19:37:28 +01001305 else
Robin Murphy19713fd2019-08-15 19:37:30 +01001306 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001307
Robin Murphy19713fd2019-08-15 19:37:30 +01001308 reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR;
Will Deaconfba6e962020-01-10 13:20:03 +00001309 if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ARM_SMMU_ATSR_ACTIVE),
1310 5, 50)) {
Robin Murphy523d7422017-06-22 16:53:56 +01001311 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001312 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001313 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001314 &iova);
1315 return ops->iova_to_phys(ops, iova);
1316 }
1317
Robin Murphy19713fd2019-08-15 19:37:30 +01001318 phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR);
Robin Murphy523d7422017-06-22 16:53:56 +01001319 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Will Deaconfba6e962020-01-10 13:20:03 +00001320 if (phys & ARM_SMMU_CB_PAR_F) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001321 dev_err(dev, "translation fault!\n");
1322 dev_err(dev, "PAR = 0x%llx\n", phys);
1323 return 0;
1324 }
1325
Sricharan Rd4a44f02018-12-04 11:52:10 +05301326 arm_smmu_rpm_put(smmu);
1327
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001328 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1329}
1330
Will Deacon45ae7cf2013-06-24 18:31:25 +01001331static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001332 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001333{
Joerg Roedel1d672632015-03-26 13:43:10 +01001334 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Robin Murphy523d7422017-06-22 16:53:56 +01001335 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001336
Sunil Gouthambdf95922017-04-25 15:27:52 +05301337 if (domain->type == IOMMU_DOMAIN_IDENTITY)
1338 return iova;
1339
Will Deacon518f7132014-11-14 17:17:54 +00001340 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001341 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001342
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001343 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
Robin Murphy523d7422017-06-22 16:53:56 +01001344 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1345 return arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001346
Robin Murphy523d7422017-06-22 16:53:56 +01001347 return ops->iova_to_phys(ops, iova);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001348}
1349
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001350static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001351{
Will Deacond0948942014-06-24 17:30:10 +01001352 switch (cap) {
1353 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001354 /*
1355 * Return true here as the SMMU can always send out coherent
1356 * requests.
1357 */
1358 return true;
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001359 case IOMMU_CAP_NOEXEC:
1360 return true;
Will Deacond0948942014-06-24 17:30:10 +01001361 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001362 return false;
Will Deacond0948942014-06-24 17:30:10 +01001363 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001364}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001365
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001366static
1367struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
Robin Murphy021bb842016-09-14 15:26:46 +01001368{
Suzuki K Poulose67843bb2019-07-23 23:18:34 +01001369 struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver,
1370 fwnode);
Robin Murphy021bb842016-09-14 15:26:46 +01001371 put_device(dev);
1372 return dev ? dev_get_drvdata(dev) : NULL;
1373}
1374
Joerg Roedelcefa0d52020-04-29 15:36:55 +02001375static struct iommu_device *arm_smmu_probe_device(struct device *dev)
Will Deacon03edb222015-01-19 14:27:33 +00001376{
Joerg Roedel0b242eb2020-03-26 16:08:32 +01001377 struct arm_smmu_device *smmu = NULL;
Robin Murphyf80cd882016-09-14 15:21:39 +01001378 struct arm_smmu_master_cfg *cfg;
Joerg Roedel9b468f72018-11-29 14:01:00 +01001379 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001380 int i, ret;
1381
Robin Murphy021bb842016-09-14 15:26:46 +01001382 if (using_legacy_binding) {
1383 ret = arm_smmu_register_legacy_master(dev, &smmu);
Artem Savkova7990c62017-08-08 12:26:02 +02001384
1385 /*
1386 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
1387 * will allocate/initialise a new one. Thus we need to update fwspec for
1388 * later use.
1389 */
Joerg Roedel9b468f72018-11-29 14:01:00 +01001390 fwspec = dev_iommu_fwspec_get(dev);
Robin Murphy021bb842016-09-14 15:26:46 +01001391 if (ret)
1392 goto out_free;
Robin Murphy3c117b52016-11-02 17:31:32 +00001393 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001394 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
Robin Murphy021bb842016-09-14 15:26:46 +01001395 } else {
Joerg Roedelcefa0d52020-04-29 15:36:55 +02001396 return ERR_PTR(-ENODEV);
Robin Murphy021bb842016-09-14 15:26:46 +01001397 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001398
1399 ret = -EINVAL;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001400 for (i = 0; i < fwspec->num_ids; i++) {
Will Deaconfba6e962020-01-10 13:20:03 +00001401 u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
1402 u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01001403
Robin Murphyadfec2e2016-09-12 17:13:55 +01001404 if (sid & ~smmu->streamid_mask) {
Robin Murphyf80cd882016-09-14 15:21:39 +01001405 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy021bb842016-09-14 15:26:46 +01001406 sid, smmu->streamid_mask);
1407 goto out_free;
1408 }
1409 if (mask & ~smmu->smr_mask_mask) {
1410 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
Peng Fan6323f472017-04-21 17:03:36 +08001411 mask, smmu->smr_mask_mask);
Robin Murphyf80cd882016-09-14 15:21:39 +01001412 goto out_free;
1413 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001414 }
Will Deacon03edb222015-01-19 14:27:33 +00001415
Robin Murphyadfec2e2016-09-12 17:13:55 +01001416 ret = -ENOMEM;
1417 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1418 GFP_KERNEL);
1419 if (!cfg)
1420 goto out_free;
1421
1422 cfg->smmu = smmu;
Joerg Roedelc84500a2020-03-26 16:08:36 +01001423 dev_iommu_priv_set(dev, cfg);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001424 while (i--)
1425 cfg->smendx[i] = INVALID_SMENDX;
1426
Sricharan Rd4a44f02018-12-04 11:52:10 +05301427 ret = arm_smmu_rpm_get(smmu);
1428 if (ret < 0)
1429 goto out_cfg_free;
1430
Robin Murphy588888a2016-09-12 17:13:54 +01001431 ret = arm_smmu_master_alloc_smes(dev);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301432 arm_smmu_rpm_put(smmu);
1433
Robin Murphyadfec2e2016-09-12 17:13:55 +01001434 if (ret)
Vivek Gautamc54451a2017-07-06 15:07:00 +05301435 goto out_cfg_free;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001436
Sricharan R655e3642018-12-04 11:52:11 +05301437 device_link_add(dev, smmu->dev,
1438 DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
1439
Joerg Roedelcefa0d52020-04-29 15:36:55 +02001440 return &smmu->iommu;
Robin Murphyf80cd882016-09-14 15:21:39 +01001441
Vivek Gautamc54451a2017-07-06 15:07:00 +05301442out_cfg_free:
1443 kfree(cfg);
Robin Murphyf80cd882016-09-14 15:21:39 +01001444out_free:
Robin Murphyadfec2e2016-09-12 17:13:55 +01001445 iommu_fwspec_free(dev);
Joerg Roedelcefa0d52020-04-29 15:36:55 +02001446 return ERR_PTR(ret);
Will Deacon03edb222015-01-19 14:27:33 +00001447}
1448
Joerg Roedelcefa0d52020-04-29 15:36:55 +02001449static void arm_smmu_release_device(struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001450{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001451 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001452 struct arm_smmu_master_cfg *cfg;
1453 struct arm_smmu_device *smmu;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301454 int ret;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001455
Robin Murphyadfec2e2016-09-12 17:13:55 +01001456 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyf80cd882016-09-14 15:21:39 +01001457 return;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001458
Joerg Roedelc84500a2020-03-26 16:08:36 +01001459 cfg = dev_iommu_priv_get(dev);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001460 smmu = cfg->smmu;
1461
Sricharan Rd4a44f02018-12-04 11:52:10 +05301462 ret = arm_smmu_rpm_get(smmu);
1463 if (ret < 0)
1464 return;
1465
Robin Murphy24651702020-03-26 16:08:35 +01001466 arm_smmu_master_free_smes(cfg, fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301467
1468 arm_smmu_rpm_put(smmu);
1469
Joerg Roedelc84500a2020-03-26 16:08:36 +01001470 dev_iommu_priv_set(dev, NULL);
Joerg Roedelc84500a2020-03-26 16:08:36 +01001471 kfree(cfg);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001472 iommu_fwspec_free(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001473}
1474
Joerg Roedelaf659932015-10-21 23:51:41 +02001475static struct iommu_group *arm_smmu_device_group(struct device *dev)
1476{
Joerg Roedelc84500a2020-03-26 16:08:36 +01001477 struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
Joerg Roedel9b468f72018-11-29 14:01:00 +01001478 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphy24651702020-03-26 16:08:35 +01001479 struct arm_smmu_device *smmu = cfg->smmu;
Robin Murphy588888a2016-09-12 17:13:54 +01001480 struct iommu_group *group = NULL;
1481 int i, idx;
1482
Robin Murphy24651702020-03-26 16:08:35 +01001483 for_each_cfg_sme(cfg, fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001484 if (group && smmu->s2crs[idx].group &&
1485 group != smmu->s2crs[idx].group)
1486 return ERR_PTR(-EINVAL);
1487
1488 group = smmu->s2crs[idx].group;
1489 }
1490
1491 if (group)
Robin Murphye1b44cb2016-11-11 17:59:22 +00001492 return iommu_group_ref_get(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001493
1494 if (dev_is_pci(dev))
1495 group = pci_device_group(dev);
Nipun Guptaeab03e22018-09-10 19:19:18 +05301496 else if (dev_is_fsl_mc(dev))
1497 group = fsl_mc_device_group(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001498 else
1499 group = generic_device_group(dev);
1500
Joerg Roedelcefa0d52020-04-29 15:36:55 +02001501 /* Remember group for faster lookups */
1502 if (!IS_ERR(group))
1503 for_each_cfg_sme(cfg, fwspec, i, idx)
1504 smmu->s2crs[idx].group = group;
1505
Joerg Roedelaf659932015-10-21 23:51:41 +02001506 return group;
1507}
1508
Will Deaconc752ce42014-06-25 22:46:31 +01001509static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1510 enum iommu_attr attr, void *data)
1511{
Joerg Roedel1d672632015-03-26 13:43:10 +01001512 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001513
Robin Murphy44f68762018-09-20 17:10:27 +01001514 switch(domain->type) {
1515 case IOMMU_DOMAIN_UNMANAGED:
1516 switch (attr) {
1517 case DOMAIN_ATTR_NESTING:
1518 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1519 return 0;
1520 default:
1521 return -ENODEV;
1522 }
1523 break;
1524 case IOMMU_DOMAIN_DMA:
1525 switch (attr) {
1526 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1527 *(int *)data = smmu_domain->non_strict;
1528 return 0;
1529 default:
1530 return -ENODEV;
1531 }
1532 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001533 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001534 return -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001535 }
1536}
1537
1538static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1539 enum iommu_attr attr, void *data)
1540{
Will Deacon518f7132014-11-14 17:17:54 +00001541 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001542 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001543
Will Deacon518f7132014-11-14 17:17:54 +00001544 mutex_lock(&smmu_domain->init_mutex);
1545
Robin Murphy44f68762018-09-20 17:10:27 +01001546 switch(domain->type) {
1547 case IOMMU_DOMAIN_UNMANAGED:
1548 switch (attr) {
1549 case DOMAIN_ATTR_NESTING:
1550 if (smmu_domain->smmu) {
1551 ret = -EPERM;
1552 goto out_unlock;
1553 }
1554
1555 if (*(int *)data)
1556 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1557 else
1558 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1559 break;
1560 default:
1561 ret = -ENODEV;
Will Deacon518f7132014-11-14 17:17:54 +00001562 }
Robin Murphy44f68762018-09-20 17:10:27 +01001563 break;
1564 case IOMMU_DOMAIN_DMA:
1565 switch (attr) {
1566 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1567 smmu_domain->non_strict = *(int *)data;
1568 break;
1569 default:
1570 ret = -ENODEV;
1571 }
Will Deacon518f7132014-11-14 17:17:54 +00001572 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001573 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001574 ret = -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001575 }
Will Deacon518f7132014-11-14 17:17:54 +00001576out_unlock:
1577 mutex_unlock(&smmu_domain->init_mutex);
1578 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001579}
1580
Robin Murphy021bb842016-09-14 15:26:46 +01001581static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1582{
Robin Murphy56fbf602017-03-31 12:03:33 +01001583 u32 mask, fwid = 0;
Robin Murphy021bb842016-09-14 15:26:46 +01001584
1585 if (args->args_count > 0)
Will Deaconfba6e962020-01-10 13:20:03 +00001586 fwid |= FIELD_PREP(ARM_SMMU_SMR_ID, args->args[0]);
Robin Murphy021bb842016-09-14 15:26:46 +01001587
1588 if (args->args_count > 1)
Will Deaconfba6e962020-01-10 13:20:03 +00001589 fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, args->args[1]);
Robin Murphy56fbf602017-03-31 12:03:33 +01001590 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
Will Deaconfba6e962020-01-10 13:20:03 +00001591 fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, mask);
Robin Murphy021bb842016-09-14 15:26:46 +01001592
1593 return iommu_fwspec_add_ids(dev, &fwid, 1);
1594}
1595
Eric Augerf3ebee82017-01-19 20:57:55 +00001596static void arm_smmu_get_resv_regions(struct device *dev,
1597 struct list_head *head)
1598{
1599 struct iommu_resv_region *region;
1600 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1601
1602 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00001603 prot, IOMMU_RESV_SW_MSI);
Eric Augerf3ebee82017-01-19 20:57:55 +00001604 if (!region)
1605 return;
1606
1607 list_add_tail(&region->list, head);
Robin Murphy273df962017-03-16 17:00:19 +00001608
1609 iommu_dma_get_resv_regions(dev, head);
Eric Augerf3ebee82017-01-19 20:57:55 +00001610}
1611
Sai Prakash Ranjan232c5ae2020-04-21 00:03:50 +05301612static int arm_smmu_def_domain_type(struct device *dev)
1613{
1614 struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
1615 const struct arm_smmu_impl *impl = cfg->smmu->impl;
1616
1617 if (impl && impl->def_domain_type)
1618 return impl->def_domain_type(dev);
1619
1620 return 0;
1621}
1622
Will Deacon518f7132014-11-14 17:17:54 +00001623static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001624 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001625 .domain_alloc = arm_smmu_domain_alloc,
1626 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001627 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001628 .map = arm_smmu_map,
1629 .unmap = arm_smmu_unmap,
Robin Murphy44f68762018-09-20 17:10:27 +01001630 .flush_iotlb_all = arm_smmu_flush_iotlb_all,
Robin Murphy32b12442017-09-28 15:55:01 +01001631 .iotlb_sync = arm_smmu_iotlb_sync,
Will Deaconc752ce42014-06-25 22:46:31 +01001632 .iova_to_phys = arm_smmu_iova_to_phys,
Joerg Roedelcefa0d52020-04-29 15:36:55 +02001633 .probe_device = arm_smmu_probe_device,
1634 .release_device = arm_smmu_release_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001635 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001636 .domain_get_attr = arm_smmu_domain_get_attr,
1637 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy021bb842016-09-14 15:26:46 +01001638 .of_xlate = arm_smmu_of_xlate,
Eric Augerf3ebee82017-01-19 20:57:55 +00001639 .get_resv_regions = arm_smmu_get_resv_regions,
Thierry Redinga66c5dc2019-12-18 14:42:02 +01001640 .put_resv_regions = generic_iommu_put_resv_regions,
Sai Prakash Ranjan232c5ae2020-04-21 00:03:50 +05301641 .def_domain_type = arm_smmu_def_domain_type,
Will Deacon518f7132014-11-14 17:17:54 +00001642 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001643};
1644
1645static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1646{
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001647 int i;
Robin Murphy62b993a2019-08-15 19:37:36 +01001648 u32 reg;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001649
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001650 /* clear global FSR */
Robin Murphy00320ce2019-08-15 19:37:31 +01001651 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
1652 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001653
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001654 /*
1655 * Reset stream mapping groups: Initial values mark all SMRn as
1656 * invalid and all S2CRn as bypass unless overridden.
1657 */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001658 for (i = 0; i < smmu->num_mapping_groups; ++i)
1659 arm_smmu_write_sme(smmu, i);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001660
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001661 /* Make sure all context banks are disabled and clear CB_FSR */
1662 for (i = 0; i < smmu->num_context_banks; ++i) {
Robin Murphy90df3732017-08-08 14:56:14 +01001663 arm_smmu_write_context_bank(smmu, i);
Will Deaconfba6e962020-01-10 13:20:03 +00001664 arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, ARM_SMMU_FSR_FAULT);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001665 }
Will Deacon1463fe42013-07-31 19:21:27 +01001666
Will Deacon45ae7cf2013-06-24 18:31:25 +01001667 /* Invalidate the TLB, just in case */
Robin Murphy00320ce2019-08-15 19:37:31 +01001668 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLH, QCOM_DUMMY_VAL);
1669 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLNSNH, QCOM_DUMMY_VAL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001670
Robin Murphy00320ce2019-08-15 19:37:31 +01001671 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001672
Will Deacon45ae7cf2013-06-24 18:31:25 +01001673 /* Enable fault reporting */
Will Deaconfba6e962020-01-10 13:20:03 +00001674 reg |= (ARM_SMMU_sCR0_GFRE | ARM_SMMU_sCR0_GFIE |
1675 ARM_SMMU_sCR0_GCFGFRE | ARM_SMMU_sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001676
1677 /* Disable TLB broadcasting. */
Will Deaconfba6e962020-01-10 13:20:03 +00001678 reg |= (ARM_SMMU_sCR0_VMIDPNE | ARM_SMMU_sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001679
Robin Murphy25a1c962016-02-10 14:25:33 +00001680 /* Enable client access, handling unmatched streams as appropriate */
Will Deaconfba6e962020-01-10 13:20:03 +00001681 reg &= ~ARM_SMMU_sCR0_CLIENTPD;
Robin Murphy25a1c962016-02-10 14:25:33 +00001682 if (disable_bypass)
Will Deaconfba6e962020-01-10 13:20:03 +00001683 reg |= ARM_SMMU_sCR0_USFCFG;
Robin Murphy25a1c962016-02-10 14:25:33 +00001684 else
Will Deaconfba6e962020-01-10 13:20:03 +00001685 reg &= ~ARM_SMMU_sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001686
1687 /* Disable forced broadcasting */
Will Deaconfba6e962020-01-10 13:20:03 +00001688 reg &= ~ARM_SMMU_sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001689
1690 /* Don't upgrade barriers */
Will Deaconfba6e962020-01-10 13:20:03 +00001691 reg &= ~(ARM_SMMU_sCR0_BSU);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001692
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001693 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Will Deaconfba6e962020-01-10 13:20:03 +00001694 reg |= ARM_SMMU_sCR0_VMID16EN;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001695
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001696 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
Will Deaconfba6e962020-01-10 13:20:03 +00001697 reg |= ARM_SMMU_sCR0_EXIDENABLE;
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001698
Robin Murphy62b993a2019-08-15 19:37:36 +01001699 if (smmu->impl && smmu->impl->reset)
1700 smmu->impl->reset(smmu);
1701
Will Deacon45ae7cf2013-06-24 18:31:25 +01001702 /* Push the button */
Robin Murphy11febfc2017-03-30 17:56:31 +01001703 arm_smmu_tlb_sync_global(smmu);
Robin Murphy00320ce2019-08-15 19:37:31 +01001704 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001705}
1706
1707static int arm_smmu_id_size_to_bits(int size)
1708{
1709 switch (size) {
1710 case 0:
1711 return 32;
1712 case 1:
1713 return 36;
1714 case 2:
1715 return 40;
1716 case 3:
1717 return 42;
1718 case 4:
1719 return 44;
1720 case 5:
1721 default:
1722 return 48;
1723 }
1724}
1725
1726static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1727{
Robin Murphy490325e2019-08-15 19:37:26 +01001728 unsigned int size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001729 u32 id;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001730 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001731 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001732
1733 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001734 dev_notice(smmu->dev, "SMMUv%d with:\n",
1735 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001736
1737 /* ID0 */
Robin Murphy00320ce2019-08-15 19:37:31 +01001738 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001739
1740 /* Restrict available stages based on module parameter */
1741 if (force_stage == 1)
Will Deaconfba6e962020-01-10 13:20:03 +00001742 id &= ~(ARM_SMMU_ID0_S2TS | ARM_SMMU_ID0_NTS);
Will Deacon4cf740b2014-07-14 19:47:39 +01001743 else if (force_stage == 2)
Will Deaconfba6e962020-01-10 13:20:03 +00001744 id &= ~(ARM_SMMU_ID0_S1TS | ARM_SMMU_ID0_NTS);
Will Deacon4cf740b2014-07-14 19:47:39 +01001745
Will Deaconfba6e962020-01-10 13:20:03 +00001746 if (id & ARM_SMMU_ID0_S1TS) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001747 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1748 dev_notice(smmu->dev, "\tstage 1 translation\n");
1749 }
1750
Will Deaconfba6e962020-01-10 13:20:03 +00001751 if (id & ARM_SMMU_ID0_S2TS) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001752 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1753 dev_notice(smmu->dev, "\tstage 2 translation\n");
1754 }
1755
Will Deaconfba6e962020-01-10 13:20:03 +00001756 if (id & ARM_SMMU_ID0_NTS) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001757 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1758 dev_notice(smmu->dev, "\tnested translation\n");
1759 }
1760
1761 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001762 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001763 dev_err(smmu->dev, "\tno translation support!\n");
1764 return -ENODEV;
1765 }
1766
Will Deaconfba6e962020-01-10 13:20:03 +00001767 if ((id & ARM_SMMU_ID0_S1TS) &&
1768 ((smmu->version < ARM_SMMU_V2) || !(id & ARM_SMMU_ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001769 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1770 dev_notice(smmu->dev, "\taddress translation ops\n");
1771 }
1772
Robin Murphybae2c2d2015-07-29 19:46:05 +01001773 /*
1774 * In order for DMA API calls to work properly, we must defer to what
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001775 * the FW says about coherency, regardless of what the hardware claims.
Robin Murphybae2c2d2015-07-29 19:46:05 +01001776 * Fortunately, this also opens up a workaround for systems where the
1777 * ID register value has ended up configured incorrectly.
1778 */
Will Deaconfba6e962020-01-10 13:20:03 +00001779 cttw_reg = !!(id & ARM_SMMU_ID0_CTTW);
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001780 if (cttw_fw || cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001781 dev_notice(smmu->dev, "\t%scoherent table walk\n",
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001782 cttw_fw ? "" : "non-");
1783 if (cttw_fw != cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001784 dev_notice(smmu->dev,
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001785 "\t(IDR0.CTTW overridden by FW configuration)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001786
Robin Murphy21174242016-09-12 17:13:48 +01001787 /* Max. number of entries we have for stream matching/indexing */
Will Deaconfba6e962020-01-10 13:20:03 +00001788 if (smmu->version == ARM_SMMU_V2 && id & ARM_SMMU_ID0_EXIDS) {
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001789 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1790 size = 1 << 16;
1791 } else {
Will Deaconfba6e962020-01-10 13:20:03 +00001792 size = 1 << FIELD_GET(ARM_SMMU_ID0_NUMSIDB, id);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001793 }
Robin Murphy21174242016-09-12 17:13:48 +01001794 smmu->streamid_mask = size - 1;
Will Deaconfba6e962020-01-10 13:20:03 +00001795 if (id & ARM_SMMU_ID0_SMS) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001796 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Will Deaconfba6e962020-01-10 13:20:03 +00001797 size = FIELD_GET(ARM_SMMU_ID0_NUMSMRG, id);
Robin Murphy21174242016-09-12 17:13:48 +01001798 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001799 dev_err(smmu->dev,
1800 "stream-matching supported, but no SMRs present!\n");
1801 return -ENODEV;
1802 }
1803
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001804 /* Zero-initialised to mark as invalid */
1805 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1806 GFP_KERNEL);
1807 if (!smmu->smrs)
1808 return -ENOMEM;
1809
Will Deacon45ae7cf2013-06-24 18:31:25 +01001810 dev_notice(smmu->dev,
Robin Murphy490325e2019-08-15 19:37:26 +01001811 "\tstream matching with %u register groups", size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001812 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001813 /* s2cr->type == 0 means translation, so initialise explicitly */
1814 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1815 GFP_KERNEL);
1816 if (!smmu->s2crs)
1817 return -ENOMEM;
1818 for (i = 0; i < size; i++)
1819 smmu->s2crs[i] = s2cr_init_val;
1820
Robin Murphy21174242016-09-12 17:13:48 +01001821 smmu->num_mapping_groups = size;
Robin Murphy588888a2016-09-12 17:13:54 +01001822 mutex_init(&smmu->stream_map_mutex);
Will Deacon8e517e72017-07-06 15:55:48 +01001823 spin_lock_init(&smmu->global_sync_lock);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001824
Will Deaconfba6e962020-01-10 13:20:03 +00001825 if (smmu->version < ARM_SMMU_V2 ||
1826 !(id & ARM_SMMU_ID0_PTFS_NO_AARCH32)) {
Robin Murphy7602b872016-04-28 17:12:09 +01001827 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
Will Deaconfba6e962020-01-10 13:20:03 +00001828 if (!(id & ARM_SMMU_ID0_PTFS_NO_AARCH32S))
Robin Murphy7602b872016-04-28 17:12:09 +01001829 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1830 }
1831
Will Deacon45ae7cf2013-06-24 18:31:25 +01001832 /* ID1 */
Robin Murphy00320ce2019-08-15 19:37:31 +01001833 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1);
Will Deaconfba6e962020-01-10 13:20:03 +00001834 smmu->pgshift = (id & ARM_SMMU_ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001835
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001836 /* Check for size mismatch of SMMU address space from mapped region */
Will Deaconfba6e962020-01-10 13:20:03 +00001837 size = 1 << (FIELD_GET(ARM_SMMU_ID1_NUMPAGENDXB, id) + 1);
Robin Murphy490325e2019-08-15 19:37:26 +01001838 if (smmu->numpage != 2 * size << smmu->pgshift)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001839 dev_warn(smmu->dev,
Robin Murphy490325e2019-08-15 19:37:26 +01001840 "SMMU address space size (0x%x) differs from mapped region size (0x%x)!\n",
1841 2 * size << smmu->pgshift, smmu->numpage);
1842 /* Now properly encode NUMPAGE to subsequently derive SMMU_CB_BASE */
1843 smmu->numpage = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001844
Will Deaconfba6e962020-01-10 13:20:03 +00001845 smmu->num_s2_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMS2CB, id);
1846 smmu->num_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMCB, id);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001847 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1848 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1849 return -ENODEV;
1850 }
1851 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1852 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphy90df3732017-08-08 14:56:14 +01001853 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
1854 sizeof(*smmu->cbs), GFP_KERNEL);
1855 if (!smmu->cbs)
1856 return -ENOMEM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001857
1858 /* ID2 */
Robin Murphy00320ce2019-08-15 19:37:31 +01001859 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2);
Will Deaconfba6e962020-01-10 13:20:03 +00001860 size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_IAS, id));
Will Deacon518f7132014-11-14 17:17:54 +00001861 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001862
Will Deacon518f7132014-11-14 17:17:54 +00001863 /* The output mask is also applied for bypass */
Will Deaconfba6e962020-01-10 13:20:03 +00001864 size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_OAS, id));
Will Deacon518f7132014-11-14 17:17:54 +00001865 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001866
Will Deaconfba6e962020-01-10 13:20:03 +00001867 if (id & ARM_SMMU_ID2_VMID16)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001868 smmu->features |= ARM_SMMU_FEAT_VMID16;
1869
Robin Murphyf1d84542015-03-04 16:41:05 +00001870 /*
1871 * What the page table walker can address actually depends on which
1872 * descriptor format is in use, but since a) we don't know that yet,
1873 * and b) it can vary per context bank, this will have to do...
1874 */
1875 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1876 dev_warn(smmu->dev,
1877 "failed to set DMA mask for table walker\n");
1878
Robin Murphyb7862e32016-04-13 18:13:03 +01001879 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001880 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001881 if (smmu->version == ARM_SMMU_V1_64K)
1882 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001883 } else {
Will Deaconfba6e962020-01-10 13:20:03 +00001884 size = FIELD_GET(ARM_SMMU_ID2_UBS, id);
Will Deacon518f7132014-11-14 17:17:54 +00001885 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deaconfba6e962020-01-10 13:20:03 +00001886 if (id & ARM_SMMU_ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001887 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deaconfba6e962020-01-10 13:20:03 +00001888 if (id & ARM_SMMU_ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001889 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deaconfba6e962020-01-10 13:20:03 +00001890 if (id & ARM_SMMU_ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001891 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001892 }
1893
Robin Murphy7602b872016-04-28 17:12:09 +01001894 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01001895 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01001896 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01001897 if (smmu->features &
1898 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01001899 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01001900 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01001901 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01001902 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01001903 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01001904
Robin Murphyd5466352016-05-09 17:20:09 +01001905 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1906 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1907 else
1908 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1909 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1910 smmu->pgsize_bitmap);
1911
Will Deacon518f7132014-11-14 17:17:54 +00001912
Will Deacon28d60072014-09-01 16:24:48 +01001913 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1914 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001915 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001916
1917 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1918 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001919 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001920
Robin Murphy3995e182019-08-15 19:37:35 +01001921 if (smmu->impl && smmu->impl->cfg_probe)
1922 return smmu->impl->cfg_probe(smmu);
1923
Will Deacon45ae7cf2013-06-24 18:31:25 +01001924 return 0;
1925}
1926
Robin Murphy67b65a32016-04-13 18:12:57 +01001927struct arm_smmu_match_data {
1928 enum arm_smmu_arch_version version;
1929 enum arm_smmu_implementation model;
1930};
1931
1932#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
Sricharan R96a299d2018-12-04 11:52:09 +05301933static const struct arm_smmu_match_data name = { .version = ver, .model = imp }
Robin Murphy67b65a32016-04-13 18:12:57 +01001934
1935ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1936ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01001937ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001938ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01001939ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Vivek Gautam89cddc52018-12-04 11:52:13 +05301940ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01001941
Joerg Roedel09b52692014-10-02 12:24:45 +02001942static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01001943 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1944 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1945 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01001946 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001947 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01001948 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Vivek Gautam89cddc52018-12-04 11:52:13 +05301949 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01001950 { },
1951};
Will Deaconb06c0762019-12-19 12:03:45 +00001952MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
Robin Murphy09360402014-08-28 17:51:59 +01001953
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001954#ifdef CONFIG_ACPI
1955static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
1956{
1957 int ret = 0;
1958
1959 switch (model) {
1960 case ACPI_IORT_SMMU_V1:
1961 case ACPI_IORT_SMMU_CORELINK_MMU400:
1962 smmu->version = ARM_SMMU_V1;
1963 smmu->model = GENERIC_SMMU;
1964 break;
Robin Murphy84c24372017-06-19 16:41:56 +01001965 case ACPI_IORT_SMMU_CORELINK_MMU401:
1966 smmu->version = ARM_SMMU_V1_64K;
1967 smmu->model = GENERIC_SMMU;
1968 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001969 case ACPI_IORT_SMMU_V2:
1970 smmu->version = ARM_SMMU_V2;
1971 smmu->model = GENERIC_SMMU;
1972 break;
1973 case ACPI_IORT_SMMU_CORELINK_MMU500:
1974 smmu->version = ARM_SMMU_V2;
1975 smmu->model = ARM_MMU500;
1976 break;
Robin Murphy84c24372017-06-19 16:41:56 +01001977 case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
1978 smmu->version = ARM_SMMU_V2;
1979 smmu->model = CAVIUM_SMMUV2;
1980 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001981 default:
1982 ret = -ENODEV;
1983 }
1984
1985 return ret;
1986}
1987
1988static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
1989 struct arm_smmu_device *smmu)
1990{
1991 struct device *dev = smmu->dev;
1992 struct acpi_iort_node *node =
1993 *(struct acpi_iort_node **)dev_get_platdata(dev);
1994 struct acpi_iort_smmu *iort_smmu;
1995 int ret;
1996
1997 /* Retrieve SMMU1/2 specific data */
1998 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
1999
2000 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
2001 if (ret < 0)
2002 return ret;
2003
2004 /* Ignore the configuration access interrupt */
2005 smmu->num_global_irqs = 1;
2006
2007 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
2008 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2009
2010 return 0;
2011}
2012#else
2013static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2014 struct arm_smmu_device *smmu)
2015{
2016 return -ENODEV;
2017}
2018#endif
2019
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002020static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2021 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002022{
Robin Murphy67b65a32016-04-13 18:12:57 +01002023 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002024 struct device *dev = &pdev->dev;
Robin Murphy021bb842016-09-14 15:26:46 +01002025 bool legacy_binding;
2026
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002027 if (of_property_read_u32(dev->of_node, "#global-interrupts",
2028 &smmu->num_global_irqs)) {
2029 dev_err(dev, "missing #global-interrupts property\n");
2030 return -ENODEV;
2031 }
2032
2033 data = of_device_get_match_data(dev);
2034 smmu->version = data->version;
2035 smmu->model = data->model;
2036
Robin Murphy021bb842016-09-14 15:26:46 +01002037 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
2038 if (legacy_binding && !using_generic_binding) {
Will Deaconcd221bd2019-12-19 12:03:51 +00002039 if (!using_legacy_binding) {
2040 pr_notice("deprecated \"mmu-masters\" DT property in use; %s support unavailable\n",
2041 IS_ENABLED(CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS) ? "DMA API" : "SMMU");
2042 }
Robin Murphy021bb842016-09-14 15:26:46 +01002043 using_legacy_binding = true;
2044 } else if (!legacy_binding && !using_legacy_binding) {
2045 using_generic_binding = true;
2046 } else {
2047 dev_err(dev, "not probing due to mismatched DT properties\n");
2048 return -ENODEV;
2049 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002050
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002051 if (of_dma_is_coherent(dev->of_node))
2052 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2053
2054 return 0;
2055}
2056
Will Deacon73595722019-12-19 12:03:50 +00002057static int arm_smmu_bus_init(struct iommu_ops *ops)
Robin Murphyf6810c12017-04-10 16:51:05 +05302058{
Will Deacon73595722019-12-19 12:03:50 +00002059 int err;
2060
Robin Murphyf6810c12017-04-10 16:51:05 +05302061 /* Oh, for a proper bus abstraction */
Will Deacon73595722019-12-19 12:03:50 +00002062 if (!iommu_present(&platform_bus_type)) {
2063 err = bus_set_iommu(&platform_bus_type, ops);
2064 if (err)
2065 return err;
2066 }
Robin Murphyf6810c12017-04-10 16:51:05 +05302067#ifdef CONFIG_ARM_AMBA
Will Deacon73595722019-12-19 12:03:50 +00002068 if (!iommu_present(&amba_bustype)) {
2069 err = bus_set_iommu(&amba_bustype, ops);
2070 if (err)
2071 goto err_reset_platform_ops;
2072 }
Robin Murphyf6810c12017-04-10 16:51:05 +05302073#endif
2074#ifdef CONFIG_PCI
2075 if (!iommu_present(&pci_bus_type)) {
Will Deacon73595722019-12-19 12:03:50 +00002076 err = bus_set_iommu(&pci_bus_type, ops);
2077 if (err)
2078 goto err_reset_amba_ops;
Robin Murphyf6810c12017-04-10 16:51:05 +05302079 }
2080#endif
Nipun Guptaeab03e22018-09-10 19:19:18 +05302081#ifdef CONFIG_FSL_MC_BUS
Will Deacon73595722019-12-19 12:03:50 +00002082 if (!iommu_present(&fsl_mc_bus_type)) {
2083 err = bus_set_iommu(&fsl_mc_bus_type, ops);
2084 if (err)
2085 goto err_reset_pci_ops;
2086 }
Nipun Guptaeab03e22018-09-10 19:19:18 +05302087#endif
Will Deacon73595722019-12-19 12:03:50 +00002088 return 0;
2089
2090err_reset_pci_ops: __maybe_unused;
2091#ifdef CONFIG_PCI
2092 bus_set_iommu(&pci_bus_type, NULL);
2093#endif
2094err_reset_amba_ops: __maybe_unused;
2095#ifdef CONFIG_ARM_AMBA
2096 bus_set_iommu(&amba_bustype, NULL);
2097#endif
2098err_reset_platform_ops: __maybe_unused;
2099 bus_set_iommu(&platform_bus_type, NULL);
2100 return err;
Robin Murphyf6810c12017-04-10 16:51:05 +05302101}
2102
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002103static int arm_smmu_device_probe(struct platform_device *pdev)
2104{
2105 struct resource *res;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002106 resource_size_t ioaddr;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002107 struct arm_smmu_device *smmu;
2108 struct device *dev = &pdev->dev;
2109 int num_irqs, i, err;
2110
Will Deacon45ae7cf2013-06-24 18:31:25 +01002111 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2112 if (!smmu) {
2113 dev_err(dev, "failed to allocate arm_smmu_device\n");
2114 return -ENOMEM;
2115 }
2116 smmu->dev = dev;
2117
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002118 if (dev->of_node)
2119 err = arm_smmu_device_dt_probe(pdev, smmu);
2120 else
2121 err = arm_smmu_device_acpi_probe(pdev, smmu);
2122
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002123 if (err)
2124 return err;
Robin Murphy09360402014-08-28 17:51:59 +01002125
Robin Murphyfc058d32019-08-15 19:37:33 +01002126 smmu = arm_smmu_impl_init(smmu);
2127 if (IS_ERR(smmu))
2128 return PTR_ERR(smmu);
2129
Will Deacon45ae7cf2013-06-24 18:31:25 +01002130 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002131 ioaddr = res->start;
Julia Lawall8a7f4312013-08-19 12:20:37 +01002132 smmu->base = devm_ioremap_resource(dev, res);
2133 if (IS_ERR(smmu->base))
2134 return PTR_ERR(smmu->base);
Robin Murphy490325e2019-08-15 19:37:26 +01002135 /*
2136 * The resource size should effectively match the value of SMMU_TOP;
2137 * stash that temporarily until we know PAGESIZE to validate it with.
2138 */
2139 smmu->numpage = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002140
Will Deacon45ae7cf2013-06-24 18:31:25 +01002141 num_irqs = 0;
2142 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2143 num_irqs++;
2144 if (num_irqs > smmu->num_global_irqs)
2145 smmu->num_context_irqs++;
2146 }
2147
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002148 if (!smmu->num_context_irqs) {
2149 dev_err(dev, "found %d interrupts but expected at least %d\n",
2150 num_irqs, smmu->num_global_irqs + 1);
2151 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002152 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002153
Kees Cooka86854d2018-06-12 14:07:58 -07002154 smmu->irqs = devm_kcalloc(dev, num_irqs, sizeof(*smmu->irqs),
Will Deacon45ae7cf2013-06-24 18:31:25 +01002155 GFP_KERNEL);
2156 if (!smmu->irqs) {
2157 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2158 return -ENOMEM;
2159 }
2160
2161 for (i = 0; i < num_irqs; ++i) {
2162 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002163
Jean-Philippe Brucker34d1b082019-11-11 12:17:21 +01002164 if (irq < 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002165 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002166 smmu->irqs[i] = irq;
2167 }
2168
Sricharan R96a299d2018-12-04 11:52:09 +05302169 err = devm_clk_bulk_get_all(dev, &smmu->clks);
2170 if (err < 0) {
2171 dev_err(dev, "failed to get clocks %d\n", err);
2172 return err;
2173 }
2174 smmu->num_clks = err;
2175
2176 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
2177 if (err)
2178 return err;
2179
Olav Haugan3c8766d2014-08-22 17:12:32 -07002180 err = arm_smmu_device_cfg_probe(smmu);
2181 if (err)
2182 return err;
2183
Vivek Gautamd1e20222018-07-19 23:23:56 +05302184 if (smmu->version == ARM_SMMU_V2) {
2185 if (smmu->num_context_banks > smmu->num_context_irqs) {
2186 dev_err(dev,
2187 "found only %d context irq(s) but %d required\n",
2188 smmu->num_context_irqs, smmu->num_context_banks);
2189 return -ENODEV;
2190 }
2191
2192 /* Ignore superfluous interrupts */
2193 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002194 }
2195
Will Deacon45ae7cf2013-06-24 18:31:25 +01002196 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08002197 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2198 arm_smmu_global_fault,
2199 IRQF_SHARED,
2200 "arm-smmu global fault",
2201 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002202 if (err) {
2203 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2204 i, smmu->irqs[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01002205 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002206 }
2207 }
2208
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002209 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2210 "smmu.%pa", &ioaddr);
2211 if (err) {
2212 dev_err(dev, "Failed to register iommu in sysfs\n");
2213 return err;
2214 }
2215
2216 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2217 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2218
2219 err = iommu_device_register(&smmu->iommu);
2220 if (err) {
2221 dev_err(dev, "Failed to register iommu\n");
2222 return err;
2223 }
2224
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002225 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01002226 arm_smmu_device_reset(smmu);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03002227 arm_smmu_test_smr_masks(smmu);
Robin Murphy021bb842016-09-14 15:26:46 +01002228
Robin Murphyf6810c12017-04-10 16:51:05 +05302229 /*
Sricharan Rd4a44f02018-12-04 11:52:10 +05302230 * We want to avoid touching dev->power.lock in fastpaths unless
2231 * it's really going to do something useful - pm_runtime_enabled()
2232 * can serve as an ideal proxy for that decision. So, conditionally
2233 * enable pm_runtime.
2234 */
2235 if (dev->pm_domain) {
2236 pm_runtime_set_active(dev);
2237 pm_runtime_enable(dev);
2238 }
2239
2240 /*
Robin Murphyf6810c12017-04-10 16:51:05 +05302241 * For ACPI and generic DT bindings, an SMMU will be probed before
2242 * any device which might need it, so we want the bus ops in place
2243 * ready to handle default domain setup as soon as any SMMU exists.
2244 */
2245 if (!using_legacy_binding)
Will Deacon73595722019-12-19 12:03:50 +00002246 return arm_smmu_bus_init(&arm_smmu_ops);
Robin Murphyf6810c12017-04-10 16:51:05 +05302247
Will Deacon45ae7cf2013-06-24 18:31:25 +01002248 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002249}
2250
Will Deaconb06c0762019-12-19 12:03:45 +00002251static int arm_smmu_device_remove(struct platform_device *pdev)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002252{
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002253 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002254
2255 if (!smmu)
Will Deaconb06c0762019-12-19 12:03:45 +00002256 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002257
Will Deaconecfadb62013-07-31 19:21:28 +01002258 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Sai Prakash Ranjan02782f32020-04-23 15:25:31 +05302259 dev_notice(&pdev->dev, "disabling translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002260
Will Deacon73595722019-12-19 12:03:50 +00002261 arm_smmu_bus_init(NULL);
2262 iommu_device_unregister(&smmu->iommu);
2263 iommu_device_sysfs_remove(&smmu->iommu);
2264
Sricharan Rd4a44f02018-12-04 11:52:10 +05302265 arm_smmu_rpm_get(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002266 /* Turn the thing off */
Will Deaconfba6e962020-01-10 13:20:03 +00002267 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, ARM_SMMU_sCR0_CLIENTPD);
Sricharan Rd4a44f02018-12-04 11:52:10 +05302268 arm_smmu_rpm_put(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302269
Sricharan Rd4a44f02018-12-04 11:52:10 +05302270 if (pm_runtime_enabled(smmu->dev))
2271 pm_runtime_force_suspend(smmu->dev);
2272 else
2273 clk_bulk_disable(smmu->num_clks, smmu->clks);
2274
2275 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
Will Deaconb06c0762019-12-19 12:03:45 +00002276 return 0;
2277}
2278
2279static void arm_smmu_device_shutdown(struct platform_device *pdev)
2280{
2281 arm_smmu_device_remove(pdev);
Nate Watterson7aa86192017-06-29 18:18:15 -04002282}
2283
Sricharan R96a299d2018-12-04 11:52:09 +05302284static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
Robin Murphya2d866f2017-08-08 14:56:15 +01002285{
2286 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
Sricharan R96a299d2018-12-04 11:52:09 +05302287 int ret;
2288
2289 ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
2290 if (ret)
2291 return ret;
Robin Murphya2d866f2017-08-08 14:56:15 +01002292
2293 arm_smmu_device_reset(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302294
Will Deacon45ae7cf2013-06-24 18:31:25 +01002295 return 0;
2296}
2297
Sricharan R96a299d2018-12-04 11:52:09 +05302298static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
Dan Carpenter6614ee72013-08-21 09:34:20 +01002299{
Sricharan R96a299d2018-12-04 11:52:09 +05302300 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2301
2302 clk_bulk_disable(smmu->num_clks, smmu->clks);
2303
2304 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002305}
2306
Robin Murphya2d866f2017-08-08 14:56:15 +01002307static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
2308{
Sricharan R96a299d2018-12-04 11:52:09 +05302309 if (pm_runtime_suspended(dev))
2310 return 0;
Robin Murphya2d866f2017-08-08 14:56:15 +01002311
Sricharan R96a299d2018-12-04 11:52:09 +05302312 return arm_smmu_runtime_resume(dev);
Robin Murphya2d866f2017-08-08 14:56:15 +01002313}
2314
Sricharan R96a299d2018-12-04 11:52:09 +05302315static int __maybe_unused arm_smmu_pm_suspend(struct device *dev)
2316{
2317 if (pm_runtime_suspended(dev))
2318 return 0;
2319
2320 return arm_smmu_runtime_suspend(dev);
2321}
2322
2323static const struct dev_pm_ops arm_smmu_pm_ops = {
2324 SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_pm_suspend, arm_smmu_pm_resume)
2325 SET_RUNTIME_PM_OPS(arm_smmu_runtime_suspend,
2326 arm_smmu_runtime_resume, NULL)
2327};
Robin Murphya2d866f2017-08-08 14:56:15 +01002328
Will Deacon45ae7cf2013-06-24 18:31:25 +01002329static struct platform_driver arm_smmu_driver = {
2330 .driver = {
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002331 .name = "arm-smmu",
Masahiro Yamadacd037ff2019-12-24 17:15:00 +09002332 .of_match_table = arm_smmu_of_match,
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002333 .pm = &arm_smmu_pm_ops,
Will Deacon34debdc2019-12-19 12:03:46 +00002334 .suppress_bind_attrs = true,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002335 },
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002336 .probe = arm_smmu_device_probe,
Will Deaconb06c0762019-12-19 12:03:45 +00002337 .remove = arm_smmu_device_remove,
Nate Watterson7aa86192017-06-29 18:18:15 -04002338 .shutdown = arm_smmu_device_shutdown,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002339};
Will Deaconb06c0762019-12-19 12:03:45 +00002340module_platform_driver(arm_smmu_driver);
2341
2342MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
Will Deacon1ea27ee2019-12-19 12:03:52 +00002343MODULE_AUTHOR("Will Deacon <will@kernel.org>");
Ard Biesheuveld3daf662019-12-19 12:03:48 +00002344MODULE_ALIAS("platform:arm-smmu");
Will Deaconb06c0762019-12-19 12:03:45 +00002345MODULE_LICENSE("GPL v2");