blob: 3cfa138fbd4f6b60402f1c88a10865f6dd911745 [file] [log] [blame]
Thomas Gleixner45051532019-05-29 16:57:47 -07001// SPDX-License-Identifier: GPL-2.0-only
Will Deacon45ae7cf2013-06-24 18:31:25 +01002/*
3 * IOMMU API for ARM architected SMMU implementations.
4 *
Will Deacon45ae7cf2013-06-24 18:31:25 +01005 * Copyright (C) 2013 ARM Limited
6 *
7 * Author: Will Deacon <will.deacon@arm.com>
8 *
9 * This driver currently supports:
10 * - SMMUv1 and v2 implementations
11 * - Stream-matching and stream-indexing
12 * - v7/v8 long-descriptor format
13 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010014 * - Context fault reporting
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +030015 * - Extended Stream ID (16 bit)
Will Deacon45ae7cf2013-06-24 18:31:25 +010016 */
17
18#define pr_fmt(fmt) "arm-smmu: " fmt
19
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +000020#include <linux/acpi.h>
21#include <linux/acpi_iort.h>
Robin Murphy0caf5f42019-08-15 19:37:23 +010022#include <linux/bitfield.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010023#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000024#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010025#include <linux/dma-mapping.h>
26#include <linux/err.h>
27#include <linux/interrupt.h>
28#include <linux/io.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000029#include <linux/iopoll.h>
Will Deaconb06c0762019-12-19 12:03:45 +000030#include <linux/module.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010031#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010032#include <linux/of_address.h>
Robin Murphyd6fc5d92016-09-12 17:13:52 +010033#include <linux/of_device.h>
Robin Murphyadfec2e2016-09-12 17:13:55 +010034#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010035#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010036#include <linux/platform_device.h>
Sricharan R96a299d2018-12-04 11:52:09 +053037#include <linux/pm_runtime.h>
Robin Murphy931a0ba2019-09-17 15:45:34 +010038#include <linux/ratelimit.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/slab.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010040
41#include <linux/amba/bus.h>
Nipun Guptaeab03e22018-09-10 19:19:18 +053042#include <linux/fsl/mc.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010043
Robin Murphyc5fc6482019-08-15 19:37:32 +010044#include "arm-smmu.h"
Rob Clark2b037742017-08-09 10:43:03 -040045
Robin Murphy4e4abae2019-06-03 14:15:37 +020046/*
47 * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
48 * global register space are still, in fact, using a hypervisor to mediate it
49 * by trapping and emulating register accesses. Sadly, some deployed versions
50 * of said trapping code have bugs wherein they go horribly wrong for stores
51 * using r31 (i.e. XZR/WZR) as the source register.
52 */
53#define QCOM_DUMMY_VAL -1
54
Rob Clark2b037742017-08-09 10:43:03 -040055#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
56#define TLB_SPIN_COUNT 10
Will Deacon45ae7cf2013-06-24 18:31:25 +010057
Eric Augerf3ebee82017-01-19 20:57:55 +000058#define MSI_IOVA_BASE 0x8000000
59#define MSI_IOVA_LENGTH 0x100000
60
Will Deacon4cf740b2014-07-14 19:47:39 +010061static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +000062module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +010063MODULE_PARM_DESC(force_stage,
64 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Douglas Anderson954a03b2019-03-01 11:20:17 -080065static bool disable_bypass =
66 IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT);
Robin Murphy25a1c962016-02-10 14:25:33 +000067module_param(disable_bypass, bool, S_IRUGO);
68MODULE_PARM_DESC(disable_bypass,
69 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +010070
Robin Murphy8e8b2032016-09-12 17:13:50 +010071struct arm_smmu_s2cr {
Robin Murphy588888a2016-09-12 17:13:54 +010072 struct iommu_group *group;
73 int count;
Robin Murphy8e8b2032016-09-12 17:13:50 +010074 enum arm_smmu_s2cr_type type;
75 enum arm_smmu_s2cr_privcfg privcfg;
76 u8 cbndx;
77};
78
79#define s2cr_init_val (struct arm_smmu_s2cr){ \
80 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
81}
82
Will Deacon45ae7cf2013-06-24 18:31:25 +010083struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +010084 u16 mask;
85 u16 id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +010086 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +010087};
88
Robin Murphy90df3732017-08-08 14:56:14 +010089struct arm_smmu_cb {
90 u64 ttbr[2];
91 u32 tcr[2];
92 u32 mair[2];
93 struct arm_smmu_cfg *cfg;
94};
95
Will Deacona9a1b0b2014-05-01 18:05:08 +010096struct arm_smmu_master_cfg {
Robin Murphyf80cd882016-09-14 15:21:39 +010097 struct arm_smmu_device *smmu;
Robin Murphyadfec2e2016-09-12 17:13:55 +010098 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +010099};
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100100#define INVALID_SMENDX -1
Robin Murphyadfec2e2016-09-12 17:13:55 +0100101#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
102#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000103#define fwspec_smendx(fw, i) \
104 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphyadfec2e2016-09-12 17:13:55 +0100105#define for_each_cfg_sme(fw, i, idx) \
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000106 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100107
Robin Murphy021bb842016-09-14 15:26:46 +0100108static bool using_legacy_binding, using_generic_binding;
109
Sricharan Rd4a44f02018-12-04 11:52:10 +0530110static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
111{
112 if (pm_runtime_enabled(smmu->dev))
113 return pm_runtime_get_sync(smmu->dev);
114
115 return 0;
116}
117
118static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
119{
120 if (pm_runtime_enabled(smmu->dev))
Rob Clarkee9bdfe2019-10-31 14:31:02 -0700121 pm_runtime_put_autosuspend(smmu->dev);
Sricharan Rd4a44f02018-12-04 11:52:10 +0530122}
123
Joerg Roedel1d672632015-03-26 13:43:10 +0100124static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
125{
126 return container_of(dom, struct arm_smmu_domain, domain);
127}
128
Will Deaconcd221bd2019-12-19 12:03:51 +0000129static struct platform_driver arm_smmu_driver;
130static struct iommu_ops arm_smmu_ops;
131
132#ifdef CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS
133static int arm_smmu_bus_init(struct iommu_ops *ops);
134
Will Deacon8f68f8e2014-07-15 11:27:08 +0100135static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100136{
137 if (dev_is_pci(dev)) {
138 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700139
Will Deacona9a1b0b2014-05-01 18:05:08 +0100140 while (!pci_is_root_bus(bus))
141 bus = bus->parent;
Robin Murphyf80cd882016-09-14 15:21:39 +0100142 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100143 }
144
Robin Murphyf80cd882016-09-14 15:21:39 +0100145 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100146}
147
Robin Murphyf80cd882016-09-14 15:21:39 +0100148static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100149{
Robin Murphyf80cd882016-09-14 15:21:39 +0100150 *((__be32 *)data) = cpu_to_be32(alias);
151 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100152}
153
Robin Murphyf80cd882016-09-14 15:21:39 +0100154static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100155{
Robin Murphyf80cd882016-09-14 15:21:39 +0100156 struct of_phandle_iterator *it = *(void **)data;
157 struct device_node *np = it->node;
158 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100159
Robin Murphyf80cd882016-09-14 15:21:39 +0100160 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
Uwe Kleine-Königc680e9a2019-08-24 15:28:45 +0200161 "#stream-id-cells", -1)
Robin Murphyf80cd882016-09-14 15:21:39 +0100162 if (it->node == np) {
163 *(void **)data = dev;
164 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700165 }
Robin Murphyf80cd882016-09-14 15:21:39 +0100166 it->node = np;
167 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100168}
169
Robin Murphyadfec2e2016-09-12 17:13:55 +0100170static int arm_smmu_register_legacy_master(struct device *dev,
171 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100172{
Robin Murphyadfec2e2016-09-12 17:13:55 +0100173 struct device *smmu_dev;
Robin Murphyf80cd882016-09-14 15:21:39 +0100174 struct device_node *np;
175 struct of_phandle_iterator it;
176 void *data = &it;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100177 u32 *sids;
Robin Murphyf80cd882016-09-14 15:21:39 +0100178 __be32 pci_sid;
179 int err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100180
Robin Murphyf80cd882016-09-14 15:21:39 +0100181 np = dev_get_dev_node(dev);
182 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
183 of_node_put(np);
184 return -ENODEV;
185 }
186
187 it.node = np;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100188 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
189 __find_legacy_master_phandle);
Robin Murphyadfec2e2016-09-12 17:13:55 +0100190 smmu_dev = data;
Robin Murphyf80cd882016-09-14 15:21:39 +0100191 of_node_put(np);
192 if (err == 0)
193 return -ENODEV;
194 if (err < 0)
195 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100196
Robin Murphyf80cd882016-09-14 15:21:39 +0100197 if (dev_is_pci(dev)) {
198 /* "mmu-masters" assumes Stream ID == Requester ID */
199 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
200 &pci_sid);
201 it.cur = &pci_sid;
202 it.cur_count = 1;
203 }
204
Robin Murphyadfec2e2016-09-12 17:13:55 +0100205 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
206 &arm_smmu_ops);
207 if (err)
208 return err;
209
210 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
211 if (!sids)
Robin Murphyf80cd882016-09-14 15:21:39 +0100212 return -ENOMEM;
213
Robin Murphyadfec2e2016-09-12 17:13:55 +0100214 *smmu = dev_get_drvdata(smmu_dev);
215 of_phandle_iterator_args(&it, sids, it.cur_count);
216 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
217 kfree(sids);
218 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100219}
220
Will Deaconcd221bd2019-12-19 12:03:51 +0000221/*
222 * With the legacy DT binding in play, we have no guarantees about
223 * probe order, but then we're also not doing default domains, so we can
224 * delay setting bus ops until we're sure every possible SMMU is ready,
225 * and that way ensure that no add_device() calls get missed.
226 */
227static int arm_smmu_legacy_bus_init(void)
228{
229 if (using_legacy_binding)
230 return arm_smmu_bus_init(&arm_smmu_ops);
231 return 0;
232}
233device_initcall_sync(arm_smmu_legacy_bus_init);
234#else
235static int arm_smmu_register_legacy_master(struct device *dev,
236 struct arm_smmu_device **smmu)
237{
238 return -ENODEV;
239}
240#endif /* CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS */
241
Will Deacon45ae7cf2013-06-24 18:31:25 +0100242static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
243{
244 int idx;
245
246 do {
247 idx = find_next_zero_bit(map, end, start);
248 if (idx == end)
249 return -ENOSPC;
250 } while (test_and_set_bit(idx, map));
251
252 return idx;
253}
254
255static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
256{
257 clear_bit(idx, map);
258}
259
260/* Wait for any pending TLB invalidations to complete */
Robin Murphy19713fd2019-08-15 19:37:30 +0100261static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
262 int sync, int status)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100263{
Robin Murphy8513c892017-03-30 17:56:32 +0100264 unsigned int spin_cnt, delay;
Robin Murphy19713fd2019-08-15 19:37:30 +0100265 u32 reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100266
Robin Murphyae2b60f2019-09-18 17:17:50 +0100267 if (smmu->impl && unlikely(smmu->impl->tlb_sync))
268 return smmu->impl->tlb_sync(smmu, page, sync, status);
269
Robin Murphy19713fd2019-08-15 19:37:30 +0100270 arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
Robin Murphy8513c892017-03-30 17:56:32 +0100271 for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
272 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100273 reg = arm_smmu_readl(smmu, page, status);
274 if (!(reg & sTLBGSTATUS_GSACTIVE))
Robin Murphy8513c892017-03-30 17:56:32 +0100275 return;
276 cpu_relax();
Will Deacon45ae7cf2013-06-24 18:31:25 +0100277 }
Robin Murphy8513c892017-03-30 17:56:32 +0100278 udelay(delay);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100279 }
Robin Murphy8513c892017-03-30 17:56:32 +0100280 dev_err_ratelimited(smmu->dev,
281 "TLB sync timed out -- SMMU may be deadlocked\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +0100282}
283
Robin Murphy11febfc2017-03-30 17:56:31 +0100284static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
Will Deacon1463fe42013-07-31 19:21:27 +0100285{
Will Deacon8e517e72017-07-06 15:55:48 +0100286 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100287
Will Deacon8e517e72017-07-06 15:55:48 +0100288 spin_lock_irqsave(&smmu->global_sync_lock, flags);
Robin Murphy00320ce2019-08-15 19:37:31 +0100289 __arm_smmu_tlb_sync(smmu, ARM_SMMU_GR0, ARM_SMMU_GR0_sTLBGSYNC,
Robin Murphy19713fd2019-08-15 19:37:30 +0100290 ARM_SMMU_GR0_sTLBGSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100291 spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000292}
293
Robin Murphyae2b60f2019-09-18 17:17:50 +0100294static void arm_smmu_tlb_sync_context(struct arm_smmu_domain *smmu_domain)
Will Deacon1463fe42013-07-31 19:21:27 +0100295{
Robin Murphy11febfc2017-03-30 17:56:31 +0100296 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon8e517e72017-07-06 15:55:48 +0100297 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100298
Will Deacon8e517e72017-07-06 15:55:48 +0100299 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy19713fd2019-08-15 19:37:30 +0100300 __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx),
301 ARM_SMMU_CB_TLBSYNC, ARM_SMMU_CB_TLBSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100302 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000303}
304
Robin Murphy11febfc2017-03-30 17:56:31 +0100305static void arm_smmu_tlb_inv_context_s1(void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000306{
307 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy44f68762018-09-20 17:10:27 +0100308 /*
Robin Murphy19713fd2019-08-15 19:37:30 +0100309 * The TLBI write may be relaxed, so ensure that PTEs cleared by the
310 * current CPU are visible beforehand.
Robin Murphy44f68762018-09-20 17:10:27 +0100311 */
Robin Murphy19713fd2019-08-15 19:37:30 +0100312 wmb();
313 arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
314 ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
Robin Murphyae2b60f2019-09-18 17:17:50 +0100315 arm_smmu_tlb_sync_context(smmu_domain);
Robin Murphy11febfc2017-03-30 17:56:31 +0100316}
317
318static void arm_smmu_tlb_inv_context_s2(void *cookie)
319{
320 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100321 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100322
Robin Murphy00320ce2019-08-15 19:37:31 +0100323 /* See above */
324 wmb();
325 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
Robin Murphy11febfc2017-03-30 17:56:31 +0100326 arm_smmu_tlb_sync_global(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100327}
328
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100329static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
Robin Murphy3370cb62019-09-18 17:17:49 +0100330 size_t granule, void *cookie, int reg)
Will Deacon518f7132014-11-14 17:17:54 +0000331{
332 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100333 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000334 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy3370cb62019-09-18 17:17:49 +0100335 int idx = cfg->cbndx;
Will Deacon518f7132014-11-14 17:17:54 +0000336
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100337 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
Will Deacon7d321bd32018-10-01 12:42:49 +0100338 wmb();
339
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100340 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
341 iova = (iova >> 12) << 12;
342 iova |= cfg->asid;
Robin Murphy75df1382015-12-07 18:18:52 +0000343 do {
Robin Murphy19713fd2019-08-15 19:37:30 +0100344 arm_smmu_cb_write(smmu, idx, reg, iova);
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100345 iova += granule;
346 } while (size -= granule);
347 } else {
348 iova >>= 12;
349 iova |= (u64)cfg->asid << 48;
350 do {
Robin Murphy19713fd2019-08-15 19:37:30 +0100351 arm_smmu_cb_writeq(smmu, idx, reg, iova);
Robin Murphy75df1382015-12-07 18:18:52 +0000352 iova += granule >> 12;
353 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000354 }
355}
356
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100357static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
Robin Murphy3370cb62019-09-18 17:17:49 +0100358 size_t granule, void *cookie, int reg)
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100359{
360 struct arm_smmu_domain *smmu_domain = cookie;
361 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy3370cb62019-09-18 17:17:49 +0100362 int idx = smmu_domain->cfg.cbndx;
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100363
364 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
365 wmb();
366
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100367 iova >>= 12;
368 do {
Robin Murphy61005762019-08-15 19:37:28 +0100369 if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy19713fd2019-08-15 19:37:30 +0100370 arm_smmu_cb_writeq(smmu, idx, reg, iova);
Robin Murphy61005762019-08-15 19:37:28 +0100371 else
Robin Murphy19713fd2019-08-15 19:37:30 +0100372 arm_smmu_cb_write(smmu, idx, reg, iova);
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100373 iova += granule >> 12;
374 } while (size -= granule);
375}
376
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100377static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size,
378 size_t granule, void *cookie)
379{
Robin Murphy3370cb62019-09-18 17:17:49 +0100380 arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
381 ARM_SMMU_CB_S1_TLBIVA);
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100382 arm_smmu_tlb_sync_context(cookie);
383}
384
385static void arm_smmu_tlb_inv_leaf_s1(unsigned long iova, size_t size,
386 size_t granule, void *cookie)
387{
Robin Murphy3370cb62019-09-18 17:17:49 +0100388 arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
389 ARM_SMMU_CB_S1_TLBIVAL);
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100390 arm_smmu_tlb_sync_context(cookie);
391}
392
393static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather,
394 unsigned long iova, size_t granule,
395 void *cookie)
396{
Robin Murphy3370cb62019-09-18 17:17:49 +0100397 arm_smmu_tlb_inv_range_s1(iova, granule, granule, cookie,
398 ARM_SMMU_CB_S1_TLBIVAL);
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100399}
400
401static void arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size,
402 size_t granule, void *cookie)
403{
Robin Murphy3370cb62019-09-18 17:17:49 +0100404 arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie,
405 ARM_SMMU_CB_S2_TLBIIPAS2);
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100406 arm_smmu_tlb_sync_context(cookie);
407}
408
409static void arm_smmu_tlb_inv_leaf_s2(unsigned long iova, size_t size,
410 size_t granule, void *cookie)
411{
Robin Murphy3370cb62019-09-18 17:17:49 +0100412 arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie,
413 ARM_SMMU_CB_S2_TLBIIPAS2L);
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100414 arm_smmu_tlb_sync_context(cookie);
415}
416
417static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather,
418 unsigned long iova, size_t granule,
419 void *cookie)
420{
Robin Murphy3370cb62019-09-18 17:17:49 +0100421 arm_smmu_tlb_inv_range_s2(iova, granule, granule, cookie,
422 ARM_SMMU_CB_S2_TLBIIPAS2L);
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100423}
424
425static void arm_smmu_tlb_inv_any_s2_v1(unsigned long iova, size_t size,
426 size_t granule, void *cookie)
427{
428 arm_smmu_tlb_inv_context_s2(cookie);
429}
Robin Murphy11febfc2017-03-30 17:56:31 +0100430/*
431 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
432 * almost negligible, but the benefit of getting the first one in as far ahead
433 * of the sync as possible is significant, hence we don't just make this a
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100434 * no-op and call arm_smmu_tlb_inv_context_s2() from .iotlb_sync as you might
435 * think.
Robin Murphy11febfc2017-03-30 17:56:31 +0100436 */
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100437static void arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather,
438 unsigned long iova, size_t granule,
439 void *cookie)
Robin Murphy11febfc2017-03-30 17:56:31 +0100440{
441 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy00320ce2019-08-15 19:37:31 +0100442 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy11febfc2017-03-30 17:56:31 +0100443
Robin Murphy00320ce2019-08-15 19:37:31 +0100444 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
Will Deacon7d321bd32018-10-01 12:42:49 +0100445 wmb();
446
Robin Murphy00320ce2019-08-15 19:37:31 +0100447 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
Robin Murphy11febfc2017-03-30 17:56:31 +0100448}
449
Robin Murphy696bcfb2019-09-18 17:17:51 +0100450static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = {
451 .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
452 .tlb_flush_walk = arm_smmu_tlb_inv_walk_s1,
453 .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s1,
454 .tlb_add_page = arm_smmu_tlb_add_page_s1,
Robin Murphy11febfc2017-03-30 17:56:31 +0100455};
456
Robin Murphy696bcfb2019-09-18 17:17:51 +0100457static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
458 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
459 .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2,
460 .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s2,
461 .tlb_add_page = arm_smmu_tlb_add_page_s2,
Robin Murphy11febfc2017-03-30 17:56:31 +0100462};
463
Robin Murphy696bcfb2019-09-18 17:17:51 +0100464static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
465 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
466 .tlb_flush_walk = arm_smmu_tlb_inv_any_s2_v1,
467 .tlb_flush_leaf = arm_smmu_tlb_inv_any_s2_v1,
468 .tlb_add_page = arm_smmu_tlb_add_page_s2_v1,
Will Deacon518f7132014-11-14 17:17:54 +0000469};
470
Will Deacon45ae7cf2013-06-24 18:31:25 +0100471static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
472{
Vivek Gautambc580b52019-04-22 12:40:36 +0530473 u32 fsr, fsynr, cbfrsynra;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100474 unsigned long iova;
475 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100476 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100477 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy19713fd2019-08-15 19:37:30 +0100478 int idx = smmu_domain->cfg.cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100479
Robin Murphy19713fd2019-08-15 19:37:30 +0100480 fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100481 if (!(fsr & FSR_FAULT))
482 return IRQ_NONE;
483
Robin Murphy19713fd2019-08-15 19:37:30 +0100484 fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
485 iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
486 cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100487
Will Deacon3714ce1d2016-08-05 19:49:45 +0100488 dev_err_ratelimited(smmu->dev,
Vivek Gautambc580b52019-04-22 12:40:36 +0530489 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
Robin Murphy19713fd2019-08-15 19:37:30 +0100490 fsr, iova, fsynr, cbfrsynra, idx);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100491
Robin Murphy19713fd2019-08-15 19:37:30 +0100492 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100493 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100494}
495
496static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
497{
498 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
499 struct arm_smmu_device *smmu = dev;
Robin Murphy931a0ba2019-09-17 15:45:34 +0100500 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
501 DEFAULT_RATELIMIT_BURST);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100502
Robin Murphy00320ce2019-08-15 19:37:31 +0100503 gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
504 gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0);
505 gfsynr1 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR1);
506 gfsynr2 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100507
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000508 if (!gfsr)
509 return IRQ_NONE;
510
Robin Murphy931a0ba2019-09-17 15:45:34 +0100511 if (__ratelimit(&rs)) {
512 if (IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT) &&
513 (gfsr & sGFSR_USF))
514 dev_err(smmu->dev,
515 "Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may have security implications\n",
516 (u16)gfsynr1);
517 else
518 dev_err(smmu->dev,
519 "Unexpected global fault, this could be serious\n");
520 dev_err(smmu->dev,
521 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
522 gfsr, gfsynr0, gfsynr1, gfsynr2);
523 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100524
Robin Murphy00320ce2019-08-15 19:37:31 +0100525 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr);
Will Deaconadaba322013-07-31 19:21:26 +0100526 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100527}
528
Will Deacon518f7132014-11-14 17:17:54 +0000529static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
530 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100531{
Will Deacon44680ee2014-06-25 11:29:12 +0100532 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy90df3732017-08-08 14:56:14 +0100533 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
534 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
535
536 cb->cfg = cfg;
537
Robin Murphy620565a2019-08-15 19:37:25 +0100538 /* TCR */
Robin Murphy90df3732017-08-08 14:56:14 +0100539 if (stage1) {
540 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
541 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
542 } else {
543 cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
544 cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Robin Murphy620565a2019-08-15 19:37:25 +0100545 cb->tcr[1] |= FIELD_PREP(TCR2_SEP, TCR2_SEP_UPSTREAM);
Robin Murphy90df3732017-08-08 14:56:14 +0100546 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy620565a2019-08-15 19:37:25 +0100547 cb->tcr[1] |= TCR2_AS;
Robin Murphy90df3732017-08-08 14:56:14 +0100548 }
549 } else {
550 cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
551 }
552
553 /* TTBRs */
554 if (stage1) {
555 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
556 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
557 cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
558 } else {
559 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Robin Murphy620565a2019-08-15 19:37:25 +0100560 cb->ttbr[0] |= FIELD_PREP(TTBRn_ASID, cfg->asid);
Robin Murphy90df3732017-08-08 14:56:14 +0100561 cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Robin Murphy620565a2019-08-15 19:37:25 +0100562 cb->ttbr[1] |= FIELD_PREP(TTBRn_ASID, cfg->asid);
Robin Murphy90df3732017-08-08 14:56:14 +0100563 }
564 } else {
565 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
566 }
567
568 /* MAIRs (stage-1 only) */
569 if (stage1) {
570 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
571 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
572 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
573 } else {
Robin Murphy205577a2019-10-25 19:08:36 +0100574 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair;
575 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair >> 32;
Robin Murphy90df3732017-08-08 14:56:14 +0100576 }
577 }
578}
579
580static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
581{
582 u32 reg;
583 bool stage1;
584 struct arm_smmu_cb *cb = &smmu->cbs[idx];
585 struct arm_smmu_cfg *cfg = cb->cfg;
Robin Murphy90df3732017-08-08 14:56:14 +0100586
587 /* Unassigned context banks only need disabling */
588 if (!cfg) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100589 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, 0);
Robin Murphy90df3732017-08-08 14:56:14 +0100590 return;
591 }
592
Will Deacon44680ee2014-06-25 11:29:12 +0100593 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100594
Robin Murphy90df3732017-08-08 14:56:14 +0100595 /* CBA2R */
Will Deacon4a1c93c2015-03-04 12:21:03 +0000596 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100597 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy5114e962019-08-15 19:37:24 +0100598 reg = CBA2R_VA64;
Robin Murphy7602b872016-04-28 17:12:09 +0100599 else
Robin Murphy5114e962019-08-15 19:37:24 +0100600 reg = 0;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800601 /* 16-bit VMIDs live in CBA2R */
602 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Robin Murphy5114e962019-08-15 19:37:24 +0100603 reg |= FIELD_PREP(CBA2R_VMID16, cfg->vmid);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800604
Robin Murphyaadbf212019-08-15 19:37:29 +0100605 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBA2R(idx), reg);
Will Deacon4a1c93c2015-03-04 12:21:03 +0000606 }
607
Will Deacon45ae7cf2013-06-24 18:31:25 +0100608 /* CBAR */
Robin Murphy5114e962019-08-15 19:37:24 +0100609 reg = FIELD_PREP(CBAR_TYPE, cfg->cbar);
Robin Murphyb7862e32016-04-13 18:13:03 +0100610 if (smmu->version < ARM_SMMU_V2)
Robin Murphy5114e962019-08-15 19:37:24 +0100611 reg |= FIELD_PREP(CBAR_IRPTNDX, cfg->irptndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100612
Will Deacon57ca90f2014-02-06 14:59:05 +0000613 /*
614 * Use the weakest shareability/memory types, so they are
615 * overridden by the ttbcr/pte.
616 */
617 if (stage1) {
Robin Murphy5114e962019-08-15 19:37:24 +0100618 reg |= FIELD_PREP(CBAR_S1_BPSHCFG, CBAR_S1_BPSHCFG_NSH) |
619 FIELD_PREP(CBAR_S1_MEMATTR, CBAR_S1_MEMATTR_WB);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800620 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
621 /* 8-bit VMIDs live in CBAR */
Robin Murphy5114e962019-08-15 19:37:24 +0100622 reg |= FIELD_PREP(CBAR_VMID, cfg->vmid);
Will Deacon57ca90f2014-02-06 14:59:05 +0000623 }
Robin Murphyaadbf212019-08-15 19:37:29 +0100624 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100625
Sunil Goutham125458a2017-03-28 16:11:12 +0530626 /*
Robin Murphy620565a2019-08-15 19:37:25 +0100627 * TCR
Sunil Goutham125458a2017-03-28 16:11:12 +0530628 * We must write this before the TTBRs, since it determines the
629 * access behaviour of some fields (in particular, ASID[15:8]).
630 */
Robin Murphy90df3732017-08-08 14:56:14 +0100631 if (stage1 && smmu->version > ARM_SMMU_V1)
Robin Murphy19713fd2019-08-15 19:37:30 +0100632 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]);
633 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100634
Will Deacon45ae7cf2013-06-24 18:31:25 +0100635 /* TTBRs */
Robin Murphy90df3732017-08-08 14:56:14 +0100636 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100637 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid);
638 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
639 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100640 } else {
Robin Murphy19713fd2019-08-15 19:37:30 +0100641 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
Robin Murphy90df3732017-08-08 14:56:14 +0100642 if (stage1)
Robin Murphy19713fd2019-08-15 19:37:30 +0100643 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR1,
644 cb->ttbr[1]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100645 }
646
Will Deacon518f7132014-11-14 17:17:54 +0000647 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100648 if (stage1) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100649 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]);
650 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100651 }
652
Will Deacon45ae7cf2013-06-24 18:31:25 +0100653 /* SCTLR */
Robin Murphy60705292016-08-11 17:44:06 +0100654 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100655 if (stage1)
656 reg |= SCTLR_S1_ASIDPNE;
Robin Murphy90df3732017-08-08 14:56:14 +0100657 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
658 reg |= SCTLR_E;
659
Robin Murphy19713fd2019-08-15 19:37:30 +0100660 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100661}
662
663static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100664 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100665{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100666 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000667 unsigned long ias, oas;
668 struct io_pgtable_ops *pgtbl_ops;
669 struct io_pgtable_cfg pgtbl_cfg;
670 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100671 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100672 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100673
Will Deacon518f7132014-11-14 17:17:54 +0000674 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100675 if (smmu_domain->smmu)
676 goto out_unlock;
677
Will Deacon61bc6712017-01-06 16:56:03 +0000678 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
679 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
680 smmu_domain->smmu = smmu;
681 goto out_unlock;
682 }
683
Will Deaconc752ce42014-06-25 22:46:31 +0100684 /*
685 * Mapping the requested stage onto what we support is surprisingly
686 * complicated, mainly because the spec allows S1+S2 SMMUs without
687 * support for nested translation. That means we end up with the
688 * following table:
689 *
690 * Requested Supported Actual
691 * S1 N S1
692 * S1 S1+S2 S1
693 * S1 S2 S2
694 * S1 S1 S1
695 * N N N
696 * N S1+S2 S2
697 * N S2 S2
698 * N S1 S1
699 *
700 * Note that you can't actually request stage-2 mappings.
701 */
702 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
703 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
704 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
705 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
706
Robin Murphy7602b872016-04-28 17:12:09 +0100707 /*
708 * Choosing a suitable context format is even more fiddly. Until we
709 * grow some way for the caller to express a preference, and/or move
710 * the decision into the io-pgtable code where it arguably belongs,
711 * just aim for the closest thing to the rest of the system, and hope
712 * that the hardware isn't esoteric enough that we can't assume AArch64
713 * support to be a superset of AArch32 support...
714 */
715 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
716 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100717 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
718 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
719 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
720 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
721 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100722 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
723 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
724 ARM_SMMU_FEAT_FMT_AARCH64_16K |
725 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
726 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
727
728 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
729 ret = -EINVAL;
730 goto out_unlock;
731 }
732
Will Deaconc752ce42014-06-25 22:46:31 +0100733 switch (smmu_domain->stage) {
734 case ARM_SMMU_DOMAIN_S1:
735 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
736 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000737 ias = smmu->va_size;
738 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100739 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000740 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +0100741 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +0000742 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100743 ias = min(ias, 32UL);
744 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +0100745 } else {
746 fmt = ARM_V7S;
747 ias = min(ias, 32UL);
748 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +0100749 }
Will Deaconabfd6fe2019-07-02 16:44:41 +0100750 smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops;
Will Deaconc752ce42014-06-25 22:46:31 +0100751 break;
752 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100753 /*
754 * We will likely want to change this if/when KVM gets
755 * involved.
756 */
Will Deaconc752ce42014-06-25 22:46:31 +0100757 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100758 cfg->cbar = CBAR_TYPE_S2_TRANS;
759 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000760 ias = smmu->ipa_size;
761 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100762 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000763 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100764 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000765 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100766 ias = min(ias, 40UL);
767 oas = min(oas, 40UL);
768 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100769 if (smmu->version == ARM_SMMU_V2)
Will Deaconabfd6fe2019-07-02 16:44:41 +0100770 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2;
Robin Murphy11febfc2017-03-30 17:56:31 +0100771 else
Will Deaconabfd6fe2019-07-02 16:44:41 +0100772 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1;
Will Deaconc752ce42014-06-25 22:46:31 +0100773 break;
774 default:
775 ret = -EINVAL;
776 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100777 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100778 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
779 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200780 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100781 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100782
Will Deacon44680ee2014-06-25 11:29:12 +0100783 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100784 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100785 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
786 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100787 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100788 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100789 }
790
Robin Murphy280b6832017-03-30 17:56:29 +0100791 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
Robin Murphyba7e4a02019-08-15 19:37:37 +0100792 cfg->vmid = cfg->cbndx + 1;
Robin Murphy280b6832017-03-30 17:56:29 +0100793 else
Robin Murphyba7e4a02019-08-15 19:37:37 +0100794 cfg->asid = cfg->cbndx;
795
796 smmu_domain->smmu = smmu;
797 if (smmu->impl && smmu->impl->init_context) {
798 ret = smmu->impl->init_context(smmu_domain);
799 if (ret)
800 goto out_unlock;
801 }
Robin Murphy280b6832017-03-30 17:56:29 +0100802
Will Deacon518f7132014-11-14 17:17:54 +0000803 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100804 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000805 .ias = ias,
806 .oas = oas,
Will Deacon4f418452019-06-25 12:51:25 +0100807 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
Robin Murphy696bcfb2019-09-18 17:17:51 +0100808 .tlb = smmu_domain->flush_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100809 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000810 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100811
Robin Murphy44f68762018-09-20 17:10:27 +0100812 if (smmu_domain->non_strict)
813 pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
814
Will Deacon518f7132014-11-14 17:17:54 +0000815 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
816 if (!pgtbl_ops) {
817 ret = -ENOMEM;
818 goto out_clear_smmu;
819 }
820
Robin Murphyd5466352016-05-09 17:20:09 +0100821 /* Update the domain's page sizes to reflect the page table format */
822 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Robin Murphy455eb7d2016-09-12 17:13:58 +0100823 domain->geometry.aperture_end = (1UL << ias) - 1;
824 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +0000825
826 /* Initialise the context bank with our page table cfg */
827 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
Robin Murphy90df3732017-08-08 14:56:14 +0100828 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000829
830 /*
831 * Request context fault interrupt. Do this last to avoid the
832 * handler seeing a half-initialised domain state.
833 */
Will Deacon44680ee2014-06-25 11:29:12 +0100834 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800835 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
836 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200837 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100838 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100839 cfg->irptndx, irq);
840 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100841 }
842
Will Deacon518f7132014-11-14 17:17:54 +0000843 mutex_unlock(&smmu_domain->init_mutex);
844
845 /* Publish page table ops for map/unmap */
846 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100847 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100848
Will Deacon518f7132014-11-14 17:17:54 +0000849out_clear_smmu:
Liu Xiang6db7bfb2019-09-16 21:53:00 +0800850 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000851 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100852out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +0000853 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100854 return ret;
855}
856
857static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
858{
Joerg Roedel1d672632015-03-26 13:43:10 +0100859 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100860 struct arm_smmu_device *smmu = smmu_domain->smmu;
861 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Sricharan Rd4a44f02018-12-04 11:52:10 +0530862 int ret, irq;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100863
Will Deacon61bc6712017-01-06 16:56:03 +0000864 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100865 return;
866
Sricharan Rd4a44f02018-12-04 11:52:10 +0530867 ret = arm_smmu_rpm_get(smmu);
868 if (ret < 0)
869 return;
870
Will Deacon518f7132014-11-14 17:17:54 +0000871 /*
872 * Disable the context bank and free the page tables before freeing
873 * it.
874 */
Robin Murphy90df3732017-08-08 14:56:14 +0100875 smmu->cbs[cfg->cbndx].cfg = NULL;
876 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100877
Will Deacon44680ee2014-06-25 11:29:12 +0100878 if (cfg->irptndx != INVALID_IRPTNDX) {
879 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800880 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100881 }
882
Markus Elfring44830b02015-11-06 18:32:41 +0100883 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +0100884 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Sricharan Rd4a44f02018-12-04 11:52:10 +0530885
886 arm_smmu_rpm_put(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100887}
888
Joerg Roedel1d672632015-03-26 13:43:10 +0100889static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100890{
891 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100892
Will Deacon61bc6712017-01-06 16:56:03 +0000893 if (type != IOMMU_DOMAIN_UNMANAGED &&
894 type != IOMMU_DOMAIN_DMA &&
895 type != IOMMU_DOMAIN_IDENTITY)
Joerg Roedel1d672632015-03-26 13:43:10 +0100896 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100897 /*
898 * Allocate the domain and initialise some of its data structures.
899 * We can't really do anything meaningful until we've added a
900 * master.
901 */
902 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
903 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +0100904 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100905
Robin Murphy021bb842016-09-14 15:26:46 +0100906 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
907 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +0000908 kfree(smmu_domain);
909 return NULL;
910 }
911
Will Deacon518f7132014-11-14 17:17:54 +0000912 mutex_init(&smmu_domain->init_mutex);
Robin Murphy523d7422017-06-22 16:53:56 +0100913 spin_lock_init(&smmu_domain->cb_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +0100914
915 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100916}
917
Joerg Roedel1d672632015-03-26 13:43:10 +0100918static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100919{
Joerg Roedel1d672632015-03-26 13:43:10 +0100920 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +0100921
922 /*
923 * Free the domain resources. We assume that all devices have
924 * already been detached.
925 */
Robin Murphy9adb9592016-01-26 18:06:36 +0000926 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100927 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100928 kfree(smmu_domain);
929}
930
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100931static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
932{
933 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphy0caf5f42019-08-15 19:37:23 +0100934 u32 reg = FIELD_PREP(SMR_ID, smr->id) | FIELD_PREP(SMR_MASK, smr->mask);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100935
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300936 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100937 reg |= SMR_VALID;
Robin Murphy00320ce2019-08-15 19:37:31 +0100938 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100939}
940
Robin Murphy8e8b2032016-09-12 17:13:50 +0100941static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
942{
943 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
Robin Murphy0caf5f42019-08-15 19:37:23 +0100944 u32 reg = FIELD_PREP(S2CR_TYPE, s2cr->type) |
945 FIELD_PREP(S2CR_CBNDX, s2cr->cbndx) |
946 FIELD_PREP(S2CR_PRIVCFG, s2cr->privcfg);
Robin Murphy8e8b2032016-09-12 17:13:50 +0100947
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300948 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
949 smmu->smrs[idx].valid)
950 reg |= S2CR_EXIDVALID;
Robin Murphy00320ce2019-08-15 19:37:31 +0100951 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
Robin Murphy8e8b2032016-09-12 17:13:50 +0100952}
953
954static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
955{
956 arm_smmu_write_s2cr(smmu, idx);
957 if (smmu->smrs)
958 arm_smmu_write_smr(smmu, idx);
959}
960
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300961/*
962 * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
963 * should be called after sCR0 is written.
964 */
965static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
966{
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300967 u32 smr;
968
969 if (!smmu->smrs)
970 return;
971
972 /*
973 * SMR.ID bits may not be preserved if the corresponding MASK
974 * bits are set, so check each one separately. We can reject
975 * masters later if they try to claim IDs outside these masks.
976 */
Robin Murphy0caf5f42019-08-15 19:37:23 +0100977 smr = FIELD_PREP(SMR_ID, smmu->streamid_mask);
Robin Murphy00320ce2019-08-15 19:37:31 +0100978 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(0), smr);
979 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(0));
Robin Murphy0caf5f42019-08-15 19:37:23 +0100980 smmu->streamid_mask = FIELD_GET(SMR_ID, smr);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300981
Robin Murphy0caf5f42019-08-15 19:37:23 +0100982 smr = FIELD_PREP(SMR_MASK, smmu->streamid_mask);
Robin Murphy00320ce2019-08-15 19:37:31 +0100983 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(0), smr);
984 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(0));
Robin Murphy0caf5f42019-08-15 19:37:23 +0100985 smmu->smr_mask_mask = FIELD_GET(SMR_MASK, smr);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300986}
987
Robin Murphy588888a2016-09-12 17:13:54 +0100988static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100989{
990 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy588888a2016-09-12 17:13:54 +0100991 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100992
Robin Murphy588888a2016-09-12 17:13:54 +0100993 /* Stream indexing is blissfully easy */
994 if (!smrs)
995 return id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100996
Robin Murphy588888a2016-09-12 17:13:54 +0100997 /* Validating SMRs is... less so */
998 for (i = 0; i < smmu->num_mapping_groups; ++i) {
999 if (!smrs[i].valid) {
1000 /*
1001 * Note the first free entry we come across, which
1002 * we'll claim in the end if nothing else matches.
1003 */
1004 if (free_idx < 0)
1005 free_idx = i;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001006 continue;
1007 }
Robin Murphy588888a2016-09-12 17:13:54 +01001008 /*
1009 * If the new entry is _entirely_ matched by an existing entry,
1010 * then reuse that, with the guarantee that there also cannot
1011 * be any subsequent conflicting entries. In normal use we'd
1012 * expect simply identical entries for this case, but there's
1013 * no harm in accommodating the generalisation.
1014 */
1015 if ((mask & smrs[i].mask) == mask &&
1016 !((id ^ smrs[i].id) & ~smrs[i].mask))
1017 return i;
1018 /*
1019 * If the new entry has any other overlap with an existing one,
1020 * though, then there always exists at least one stream ID
1021 * which would cause a conflict, and we can't allow that risk.
1022 */
1023 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1024 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001025 }
1026
Robin Murphy588888a2016-09-12 17:13:54 +01001027 return free_idx;
1028}
1029
1030static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1031{
1032 if (--smmu->s2crs[idx].count)
1033 return false;
1034
1035 smmu->s2crs[idx] = s2cr_init_val;
1036 if (smmu->smrs)
1037 smmu->smrs[idx].valid = false;
1038
1039 return true;
1040}
1041
1042static int arm_smmu_master_alloc_smes(struct device *dev)
1043{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001044 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001045 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy588888a2016-09-12 17:13:54 +01001046 struct arm_smmu_device *smmu = cfg->smmu;
1047 struct arm_smmu_smr *smrs = smmu->smrs;
1048 struct iommu_group *group;
1049 int i, idx, ret;
1050
1051 mutex_lock(&smmu->stream_map_mutex);
1052 /* Figure out a viable stream map entry allocation */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001053 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001054 u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]);
1055 u16 mask = FIELD_GET(SMR_MASK, fwspec->ids[i]);
Robin Murphy021bb842016-09-14 15:26:46 +01001056
Robin Murphy588888a2016-09-12 17:13:54 +01001057 if (idx != INVALID_SMENDX) {
1058 ret = -EEXIST;
1059 goto out_err;
1060 }
1061
Robin Murphy021bb842016-09-14 15:26:46 +01001062 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy588888a2016-09-12 17:13:54 +01001063 if (ret < 0)
1064 goto out_err;
1065
1066 idx = ret;
1067 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy021bb842016-09-14 15:26:46 +01001068 smrs[idx].id = sid;
1069 smrs[idx].mask = mask;
Robin Murphy588888a2016-09-12 17:13:54 +01001070 smrs[idx].valid = true;
1071 }
1072 smmu->s2crs[idx].count++;
1073 cfg->smendx[i] = (s16)idx;
1074 }
1075
1076 group = iommu_group_get_for_dev(dev);
Robin Murphy588888a2016-09-12 17:13:54 +01001077 if (IS_ERR(group)) {
1078 ret = PTR_ERR(group);
1079 goto out_err;
1080 }
1081 iommu_group_put(group);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001082
Will Deacon45ae7cf2013-06-24 18:31:25 +01001083 /* It worked! Now, poke the actual hardware */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001084 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001085 arm_smmu_write_sme(smmu, idx);
1086 smmu->s2crs[idx].group = group;
1087 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001088
Robin Murphy588888a2016-09-12 17:13:54 +01001089 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001090 return 0;
1091
Robin Murphy588888a2016-09-12 17:13:54 +01001092out_err:
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001093 while (i--) {
Robin Murphy588888a2016-09-12 17:13:54 +01001094 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001095 cfg->smendx[i] = INVALID_SMENDX;
1096 }
Robin Murphy588888a2016-09-12 17:13:54 +01001097 mutex_unlock(&smmu->stream_map_mutex);
1098 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001099}
1100
Robin Murphyadfec2e2016-09-12 17:13:55 +01001101static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001102{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001103 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1104 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphyd3097e32016-09-12 17:13:53 +01001105 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001106
Robin Murphy588888a2016-09-12 17:13:54 +01001107 mutex_lock(&smmu->stream_map_mutex);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001108 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001109 if (arm_smmu_free_sme(smmu, idx))
1110 arm_smmu_write_sme(smmu, idx);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001111 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001112 }
Robin Murphy588888a2016-09-12 17:13:54 +01001113 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001114}
1115
Will Deacon45ae7cf2013-06-24 18:31:25 +01001116static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphyadfec2e2016-09-12 17:13:55 +01001117 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001118{
Will Deacon44680ee2014-06-25 11:29:12 +01001119 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001120 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001121 u8 cbndx = smmu_domain->cfg.cbndx;
Will Deacon61bc6712017-01-06 16:56:03 +00001122 enum arm_smmu_s2cr_type type;
Robin Murphy588888a2016-09-12 17:13:54 +01001123 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001124
Will Deacon61bc6712017-01-06 16:56:03 +00001125 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
1126 type = S2CR_TYPE_BYPASS;
1127 else
1128 type = S2CR_TYPE_TRANS;
1129
Robin Murphyadfec2e2016-09-12 17:13:55 +01001130 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy8e8b2032016-09-12 17:13:50 +01001131 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy588888a2016-09-12 17:13:54 +01001132 continue;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001133
Robin Murphy8e8b2032016-09-12 17:13:50 +01001134 s2cr[idx].type = type;
Sricharan Re1989802017-01-06 18:58:15 +05301135 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001136 s2cr[idx].cbndx = cbndx;
1137 arm_smmu_write_s2cr(smmu, idx);
Will Deacon43b412b2014-07-15 11:22:24 +01001138 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001139 return 0;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001140}
1141
Will Deacon45ae7cf2013-06-24 18:31:25 +01001142static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1143{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001144 int ret;
Joerg Roedel9b468f72018-11-29 14:01:00 +01001145 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001146 struct arm_smmu_device *smmu;
Joerg Roedel1d672632015-03-26 13:43:10 +01001147 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001148
Robin Murphyadfec2e2016-09-12 17:13:55 +01001149 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001150 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1151 return -ENXIO;
1152 }
1153
Robin Murphyfba4f8e2016-10-17 12:06:21 +01001154 /*
1155 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1156 * domains between of_xlate() and add_device() - we have no way to cope
1157 * with that, so until ARM gets converted to rely on groups and default
1158 * domains, just say no (but more politely than by dereferencing NULL).
1159 * This should be at least a WARN_ON once that's sorted.
1160 */
1161 if (!fwspec->iommu_priv)
1162 return -ENODEV;
1163
Robin Murphyadfec2e2016-09-12 17:13:55 +01001164 smmu = fwspec_smmu(fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301165
1166 ret = arm_smmu_rpm_get(smmu);
1167 if (ret < 0)
1168 return ret;
1169
Will Deacon518f7132014-11-14 17:17:54 +00001170 /* Ensure that the domain is finalised */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001171 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001172 if (ret < 0)
Sricharan Rd4a44f02018-12-04 11:52:10 +05301173 goto rpm_put;
Will Deacon518f7132014-11-14 17:17:54 +00001174
Will Deacon45ae7cf2013-06-24 18:31:25 +01001175 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001176 * Sanity check the domain. We don't support domains across
1177 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001178 */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001179 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001180 dev_err(dev,
1181 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphyadfec2e2016-09-12 17:13:55 +01001182 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Sricharan Rd4a44f02018-12-04 11:52:10 +05301183 ret = -EINVAL;
1184 goto rpm_put;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001185 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001186
1187 /* Looks ok, so add the device to the domain */
Sricharan Rd4a44f02018-12-04 11:52:10 +05301188 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
1189
Rob Clarkee9bdfe2019-10-31 14:31:02 -07001190 /*
1191 * Setup an autosuspend delay to avoid bouncing runpm state.
1192 * Otherwise, if a driver for a suspended consumer device
1193 * unmaps buffers, it will runpm resume/suspend for each one.
1194 *
1195 * For example, when used by a GPU device, when an application
1196 * or game exits, it can trigger unmapping 100s or 1000s of
1197 * buffers. With a runpm cycle for each buffer, that adds up
1198 * to 5-10sec worth of reprogramming the context bank, while
1199 * the system appears to be locked up to the user.
1200 */
1201 pm_runtime_set_autosuspend_delay(smmu->dev, 20);
1202 pm_runtime_use_autosuspend(smmu->dev);
1203
Sricharan Rd4a44f02018-12-04 11:52:10 +05301204rpm_put:
1205 arm_smmu_rpm_put(smmu);
1206 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001207}
1208
Will Deacon45ae7cf2013-06-24 18:31:25 +01001209static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Tom Murphy781ca2d2019-09-08 09:56:38 -07001210 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001211{
Robin Murphy523d7422017-06-22 16:53:56 +01001212 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301213 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1214 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001215
Will Deacon518f7132014-11-14 17:17:54 +00001216 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001217 return -ENODEV;
1218
Sricharan Rd4a44f02018-12-04 11:52:10 +05301219 arm_smmu_rpm_get(smmu);
1220 ret = ops->map(ops, iova, paddr, size, prot);
1221 arm_smmu_rpm_put(smmu);
1222
1223 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001224}
1225
1226static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
Will Deacon56f8af52019-07-02 16:44:06 +01001227 size_t size, struct iommu_iotlb_gather *gather)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001228{
Robin Murphy523d7422017-06-22 16:53:56 +01001229 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301230 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1231 size_t ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001232
Will Deacon518f7132014-11-14 17:17:54 +00001233 if (!ops)
1234 return 0;
1235
Sricharan Rd4a44f02018-12-04 11:52:10 +05301236 arm_smmu_rpm_get(smmu);
Will Deacona2d3a382019-07-02 16:44:58 +01001237 ret = ops->unmap(ops, iova, size, gather);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301238 arm_smmu_rpm_put(smmu);
1239
1240 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001241}
1242
Robin Murphy44f68762018-09-20 17:10:27 +01001243static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
1244{
1245 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301246 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy44f68762018-09-20 17:10:27 +01001247
Will Deaconabfd6fe2019-07-02 16:44:41 +01001248 if (smmu_domain->flush_ops) {
Sricharan Rd4a44f02018-12-04 11:52:10 +05301249 arm_smmu_rpm_get(smmu);
Robin Murphy696bcfb2019-09-18 17:17:51 +01001250 smmu_domain->flush_ops->tlb_flush_all(smmu_domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301251 arm_smmu_rpm_put(smmu);
1252 }
Robin Murphy44f68762018-09-20 17:10:27 +01001253}
1254
Will Deacon56f8af52019-07-02 16:44:06 +01001255static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
1256 struct iommu_iotlb_gather *gather)
Robin Murphy32b12442017-09-28 15:55:01 +01001257{
1258 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301259 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy32b12442017-09-28 15:55:01 +01001260
Robin Murphyae2b60f2019-09-18 17:17:50 +01001261 if (!smmu)
1262 return;
1263
1264 arm_smmu_rpm_get(smmu);
1265 if (smmu->version == ARM_SMMU_V2 ||
1266 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1267 arm_smmu_tlb_sync_context(smmu_domain);
1268 else
1269 arm_smmu_tlb_sync_global(smmu);
1270 arm_smmu_rpm_put(smmu);
Robin Murphy32b12442017-09-28 15:55:01 +01001271}
1272
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001273static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1274 dma_addr_t iova)
1275{
Joerg Roedel1d672632015-03-26 13:43:10 +01001276 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001277 struct arm_smmu_device *smmu = smmu_domain->smmu;
1278 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1279 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1280 struct device *dev = smmu->dev;
Robin Murphy19713fd2019-08-15 19:37:30 +01001281 void __iomem *reg;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001282 u32 tmp;
1283 u64 phys;
Robin Murphy523d7422017-06-22 16:53:56 +01001284 unsigned long va, flags;
Robin Murphy19713fd2019-08-15 19:37:30 +01001285 int ret, idx = cfg->cbndx;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301286
1287 ret = arm_smmu_rpm_get(smmu);
1288 if (ret < 0)
1289 return 0;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001290
Robin Murphy523d7422017-06-22 16:53:56 +01001291 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy661d9622015-05-27 17:09:34 +01001292 va = iova & ~0xfffUL;
Robin Murphy61005762019-08-15 19:37:28 +01001293 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy19713fd2019-08-15 19:37:30 +01001294 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
Robin Murphy61005762019-08-15 19:37:28 +01001295 else
Robin Murphy19713fd2019-08-15 19:37:30 +01001296 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001297
Robin Murphy19713fd2019-08-15 19:37:30 +01001298 reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR;
1299 if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ATSR_ACTIVE), 5, 50)) {
Robin Murphy523d7422017-06-22 16:53:56 +01001300 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001301 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001302 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001303 &iova);
1304 return ops->iova_to_phys(ops, iova);
1305 }
1306
Robin Murphy19713fd2019-08-15 19:37:30 +01001307 phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR);
Robin Murphy523d7422017-06-22 16:53:56 +01001308 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001309 if (phys & CB_PAR_F) {
1310 dev_err(dev, "translation fault!\n");
1311 dev_err(dev, "PAR = 0x%llx\n", phys);
1312 return 0;
1313 }
1314
Sricharan Rd4a44f02018-12-04 11:52:10 +05301315 arm_smmu_rpm_put(smmu);
1316
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001317 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1318}
1319
Will Deacon45ae7cf2013-06-24 18:31:25 +01001320static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001321 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001322{
Joerg Roedel1d672632015-03-26 13:43:10 +01001323 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Robin Murphy523d7422017-06-22 16:53:56 +01001324 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001325
Sunil Gouthambdf95922017-04-25 15:27:52 +05301326 if (domain->type == IOMMU_DOMAIN_IDENTITY)
1327 return iova;
1328
Will Deacon518f7132014-11-14 17:17:54 +00001329 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001330 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001331
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001332 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
Robin Murphy523d7422017-06-22 16:53:56 +01001333 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1334 return arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001335
Robin Murphy523d7422017-06-22 16:53:56 +01001336 return ops->iova_to_phys(ops, iova);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001337}
1338
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001339static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001340{
Will Deacond0948942014-06-24 17:30:10 +01001341 switch (cap) {
1342 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001343 /*
1344 * Return true here as the SMMU can always send out coherent
1345 * requests.
1346 */
1347 return true;
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001348 case IOMMU_CAP_NOEXEC:
1349 return true;
Will Deacond0948942014-06-24 17:30:10 +01001350 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001351 return false;
Will Deacond0948942014-06-24 17:30:10 +01001352 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001353}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001354
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001355static
1356struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
Robin Murphy021bb842016-09-14 15:26:46 +01001357{
Suzuki K Poulose67843bb2019-07-23 23:18:34 +01001358 struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver,
1359 fwnode);
Robin Murphy021bb842016-09-14 15:26:46 +01001360 put_device(dev);
1361 return dev ? dev_get_drvdata(dev) : NULL;
1362}
1363
Will Deacon03edb222015-01-19 14:27:33 +00001364static int arm_smmu_add_device(struct device *dev)
1365{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001366 struct arm_smmu_device *smmu;
Robin Murphyf80cd882016-09-14 15:21:39 +01001367 struct arm_smmu_master_cfg *cfg;
Joerg Roedel9b468f72018-11-29 14:01:00 +01001368 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001369 int i, ret;
1370
Robin Murphy021bb842016-09-14 15:26:46 +01001371 if (using_legacy_binding) {
1372 ret = arm_smmu_register_legacy_master(dev, &smmu);
Artem Savkova7990c62017-08-08 12:26:02 +02001373
1374 /*
1375 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
1376 * will allocate/initialise a new one. Thus we need to update fwspec for
1377 * later use.
1378 */
Joerg Roedel9b468f72018-11-29 14:01:00 +01001379 fwspec = dev_iommu_fwspec_get(dev);
Robin Murphy021bb842016-09-14 15:26:46 +01001380 if (ret)
1381 goto out_free;
Robin Murphy3c117b52016-11-02 17:31:32 +00001382 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001383 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
Robin Murphy021bb842016-09-14 15:26:46 +01001384 } else {
1385 return -ENODEV;
1386 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001387
1388 ret = -EINVAL;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001389 for (i = 0; i < fwspec->num_ids; i++) {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001390 u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]);
1391 u16 mask = FIELD_GET(SMR_MASK, fwspec->ids[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01001392
Robin Murphyadfec2e2016-09-12 17:13:55 +01001393 if (sid & ~smmu->streamid_mask) {
Robin Murphyf80cd882016-09-14 15:21:39 +01001394 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy021bb842016-09-14 15:26:46 +01001395 sid, smmu->streamid_mask);
1396 goto out_free;
1397 }
1398 if (mask & ~smmu->smr_mask_mask) {
1399 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
Peng Fan6323f472017-04-21 17:03:36 +08001400 mask, smmu->smr_mask_mask);
Robin Murphyf80cd882016-09-14 15:21:39 +01001401 goto out_free;
1402 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001403 }
Will Deacon03edb222015-01-19 14:27:33 +00001404
Robin Murphyadfec2e2016-09-12 17:13:55 +01001405 ret = -ENOMEM;
1406 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1407 GFP_KERNEL);
1408 if (!cfg)
1409 goto out_free;
1410
1411 cfg->smmu = smmu;
1412 fwspec->iommu_priv = cfg;
1413 while (i--)
1414 cfg->smendx[i] = INVALID_SMENDX;
1415
Sricharan Rd4a44f02018-12-04 11:52:10 +05301416 ret = arm_smmu_rpm_get(smmu);
1417 if (ret < 0)
1418 goto out_cfg_free;
1419
Robin Murphy588888a2016-09-12 17:13:54 +01001420 ret = arm_smmu_master_alloc_smes(dev);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301421 arm_smmu_rpm_put(smmu);
1422
Robin Murphyadfec2e2016-09-12 17:13:55 +01001423 if (ret)
Vivek Gautamc54451a2017-07-06 15:07:00 +05301424 goto out_cfg_free;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001425
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001426 iommu_device_link(&smmu->iommu, dev);
1427
Sricharan R655e3642018-12-04 11:52:11 +05301428 device_link_add(dev, smmu->dev,
1429 DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
1430
Robin Murphyadfec2e2016-09-12 17:13:55 +01001431 return 0;
Robin Murphyf80cd882016-09-14 15:21:39 +01001432
Vivek Gautamc54451a2017-07-06 15:07:00 +05301433out_cfg_free:
1434 kfree(cfg);
Robin Murphyf80cd882016-09-14 15:21:39 +01001435out_free:
Robin Murphyadfec2e2016-09-12 17:13:55 +01001436 iommu_fwspec_free(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001437 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00001438}
1439
Will Deacon45ae7cf2013-06-24 18:31:25 +01001440static void arm_smmu_remove_device(struct device *dev)
1441{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001442 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001443 struct arm_smmu_master_cfg *cfg;
1444 struct arm_smmu_device *smmu;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301445 int ret;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001446
Robin Murphyadfec2e2016-09-12 17:13:55 +01001447 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyf80cd882016-09-14 15:21:39 +01001448 return;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001449
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001450 cfg = fwspec->iommu_priv;
1451 smmu = cfg->smmu;
1452
Sricharan Rd4a44f02018-12-04 11:52:10 +05301453 ret = arm_smmu_rpm_get(smmu);
1454 if (ret < 0)
1455 return;
1456
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001457 iommu_device_unlink(&smmu->iommu, dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001458 arm_smmu_master_free_smes(fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301459
1460 arm_smmu_rpm_put(smmu);
1461
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001462 iommu_group_remove_device(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001463 kfree(fwspec->iommu_priv);
1464 iommu_fwspec_free(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001465}
1466
Joerg Roedelaf659932015-10-21 23:51:41 +02001467static struct iommu_group *arm_smmu_device_group(struct device *dev)
1468{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001469 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001470 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy588888a2016-09-12 17:13:54 +01001471 struct iommu_group *group = NULL;
1472 int i, idx;
1473
Robin Murphyadfec2e2016-09-12 17:13:55 +01001474 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001475 if (group && smmu->s2crs[idx].group &&
1476 group != smmu->s2crs[idx].group)
1477 return ERR_PTR(-EINVAL);
1478
1479 group = smmu->s2crs[idx].group;
1480 }
1481
1482 if (group)
Robin Murphye1b44cb2016-11-11 17:59:22 +00001483 return iommu_group_ref_get(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001484
1485 if (dev_is_pci(dev))
1486 group = pci_device_group(dev);
Nipun Guptaeab03e22018-09-10 19:19:18 +05301487 else if (dev_is_fsl_mc(dev))
1488 group = fsl_mc_device_group(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001489 else
1490 group = generic_device_group(dev);
1491
Joerg Roedelaf659932015-10-21 23:51:41 +02001492 return group;
1493}
1494
Will Deaconc752ce42014-06-25 22:46:31 +01001495static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1496 enum iommu_attr attr, void *data)
1497{
Joerg Roedel1d672632015-03-26 13:43:10 +01001498 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001499
Robin Murphy44f68762018-09-20 17:10:27 +01001500 switch(domain->type) {
1501 case IOMMU_DOMAIN_UNMANAGED:
1502 switch (attr) {
1503 case DOMAIN_ATTR_NESTING:
1504 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1505 return 0;
1506 default:
1507 return -ENODEV;
1508 }
1509 break;
1510 case IOMMU_DOMAIN_DMA:
1511 switch (attr) {
1512 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1513 *(int *)data = smmu_domain->non_strict;
1514 return 0;
1515 default:
1516 return -ENODEV;
1517 }
1518 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001519 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001520 return -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001521 }
1522}
1523
1524static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1525 enum iommu_attr attr, void *data)
1526{
Will Deacon518f7132014-11-14 17:17:54 +00001527 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001528 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001529
Will Deacon518f7132014-11-14 17:17:54 +00001530 mutex_lock(&smmu_domain->init_mutex);
1531
Robin Murphy44f68762018-09-20 17:10:27 +01001532 switch(domain->type) {
1533 case IOMMU_DOMAIN_UNMANAGED:
1534 switch (attr) {
1535 case DOMAIN_ATTR_NESTING:
1536 if (smmu_domain->smmu) {
1537 ret = -EPERM;
1538 goto out_unlock;
1539 }
1540
1541 if (*(int *)data)
1542 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1543 else
1544 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1545 break;
1546 default:
1547 ret = -ENODEV;
Will Deacon518f7132014-11-14 17:17:54 +00001548 }
Robin Murphy44f68762018-09-20 17:10:27 +01001549 break;
1550 case IOMMU_DOMAIN_DMA:
1551 switch (attr) {
1552 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1553 smmu_domain->non_strict = *(int *)data;
1554 break;
1555 default:
1556 ret = -ENODEV;
1557 }
Will Deacon518f7132014-11-14 17:17:54 +00001558 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001559 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001560 ret = -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001561 }
Will Deacon518f7132014-11-14 17:17:54 +00001562out_unlock:
1563 mutex_unlock(&smmu_domain->init_mutex);
1564 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001565}
1566
Robin Murphy021bb842016-09-14 15:26:46 +01001567static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1568{
Robin Murphy56fbf602017-03-31 12:03:33 +01001569 u32 mask, fwid = 0;
Robin Murphy021bb842016-09-14 15:26:46 +01001570
1571 if (args->args_count > 0)
Robin Murphy0caf5f42019-08-15 19:37:23 +01001572 fwid |= FIELD_PREP(SMR_ID, args->args[0]);
Robin Murphy021bb842016-09-14 15:26:46 +01001573
1574 if (args->args_count > 1)
Robin Murphy0caf5f42019-08-15 19:37:23 +01001575 fwid |= FIELD_PREP(SMR_MASK, args->args[1]);
Robin Murphy56fbf602017-03-31 12:03:33 +01001576 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
Robin Murphy0caf5f42019-08-15 19:37:23 +01001577 fwid |= FIELD_PREP(SMR_MASK, mask);
Robin Murphy021bb842016-09-14 15:26:46 +01001578
1579 return iommu_fwspec_add_ids(dev, &fwid, 1);
1580}
1581
Eric Augerf3ebee82017-01-19 20:57:55 +00001582static void arm_smmu_get_resv_regions(struct device *dev,
1583 struct list_head *head)
1584{
1585 struct iommu_resv_region *region;
1586 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1587
1588 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00001589 prot, IOMMU_RESV_SW_MSI);
Eric Augerf3ebee82017-01-19 20:57:55 +00001590 if (!region)
1591 return;
1592
1593 list_add_tail(&region->list, head);
Robin Murphy273df962017-03-16 17:00:19 +00001594
1595 iommu_dma_get_resv_regions(dev, head);
Eric Augerf3ebee82017-01-19 20:57:55 +00001596}
1597
1598static void arm_smmu_put_resv_regions(struct device *dev,
1599 struct list_head *head)
1600{
1601 struct iommu_resv_region *entry, *next;
1602
1603 list_for_each_entry_safe(entry, next, head, list)
1604 kfree(entry);
1605}
1606
Will Deacon518f7132014-11-14 17:17:54 +00001607static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001608 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001609 .domain_alloc = arm_smmu_domain_alloc,
1610 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001611 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001612 .map = arm_smmu_map,
1613 .unmap = arm_smmu_unmap,
Robin Murphy44f68762018-09-20 17:10:27 +01001614 .flush_iotlb_all = arm_smmu_flush_iotlb_all,
Robin Murphy32b12442017-09-28 15:55:01 +01001615 .iotlb_sync = arm_smmu_iotlb_sync,
Will Deaconc752ce42014-06-25 22:46:31 +01001616 .iova_to_phys = arm_smmu_iova_to_phys,
1617 .add_device = arm_smmu_add_device,
1618 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001619 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001620 .domain_get_attr = arm_smmu_domain_get_attr,
1621 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy021bb842016-09-14 15:26:46 +01001622 .of_xlate = arm_smmu_of_xlate,
Eric Augerf3ebee82017-01-19 20:57:55 +00001623 .get_resv_regions = arm_smmu_get_resv_regions,
1624 .put_resv_regions = arm_smmu_put_resv_regions,
Will Deacon518f7132014-11-14 17:17:54 +00001625 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deaconcd221bd2019-12-19 12:03:51 +00001626 .owner = THIS_MODULE,
Will Deacon45ae7cf2013-06-24 18:31:25 +01001627};
1628
1629static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1630{
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001631 int i;
Robin Murphy62b993a2019-08-15 19:37:36 +01001632 u32 reg;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001633
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001634 /* clear global FSR */
Robin Murphy00320ce2019-08-15 19:37:31 +01001635 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
1636 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001637
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001638 /*
1639 * Reset stream mapping groups: Initial values mark all SMRn as
1640 * invalid and all S2CRn as bypass unless overridden.
1641 */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001642 for (i = 0; i < smmu->num_mapping_groups; ++i)
1643 arm_smmu_write_sme(smmu, i);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001644
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001645 /* Make sure all context banks are disabled and clear CB_FSR */
1646 for (i = 0; i < smmu->num_context_banks; ++i) {
Robin Murphy90df3732017-08-08 14:56:14 +01001647 arm_smmu_write_context_bank(smmu, i);
Robin Murphy19713fd2019-08-15 19:37:30 +01001648 arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, FSR_FAULT);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001649 }
Will Deacon1463fe42013-07-31 19:21:27 +01001650
Will Deacon45ae7cf2013-06-24 18:31:25 +01001651 /* Invalidate the TLB, just in case */
Robin Murphy00320ce2019-08-15 19:37:31 +01001652 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLH, QCOM_DUMMY_VAL);
1653 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLNSNH, QCOM_DUMMY_VAL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001654
Robin Murphy00320ce2019-08-15 19:37:31 +01001655 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001656
Will Deacon45ae7cf2013-06-24 18:31:25 +01001657 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001658 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001659
1660 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001661 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001662
Robin Murphy25a1c962016-02-10 14:25:33 +00001663 /* Enable client access, handling unmatched streams as appropriate */
1664 reg &= ~sCR0_CLIENTPD;
1665 if (disable_bypass)
1666 reg |= sCR0_USFCFG;
1667 else
1668 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001669
1670 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001671 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001672
1673 /* Don't upgrade barriers */
Robin Murphy0caf5f42019-08-15 19:37:23 +01001674 reg &= ~(sCR0_BSU);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001675
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001676 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1677 reg |= sCR0_VMID16EN;
1678
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001679 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1680 reg |= sCR0_EXIDENABLE;
1681
Robin Murphy62b993a2019-08-15 19:37:36 +01001682 if (smmu->impl && smmu->impl->reset)
1683 smmu->impl->reset(smmu);
1684
Will Deacon45ae7cf2013-06-24 18:31:25 +01001685 /* Push the button */
Robin Murphy11febfc2017-03-30 17:56:31 +01001686 arm_smmu_tlb_sync_global(smmu);
Robin Murphy00320ce2019-08-15 19:37:31 +01001687 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001688}
1689
1690static int arm_smmu_id_size_to_bits(int size)
1691{
1692 switch (size) {
1693 case 0:
1694 return 32;
1695 case 1:
1696 return 36;
1697 case 2:
1698 return 40;
1699 case 3:
1700 return 42;
1701 case 4:
1702 return 44;
1703 case 5:
1704 default:
1705 return 48;
1706 }
1707}
1708
1709static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1710{
Robin Murphy490325e2019-08-15 19:37:26 +01001711 unsigned int size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001712 u32 id;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001713 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001714 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001715
1716 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001717 dev_notice(smmu->dev, "SMMUv%d with:\n",
1718 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001719
1720 /* ID0 */
Robin Murphy00320ce2019-08-15 19:37:31 +01001721 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001722
1723 /* Restrict available stages based on module parameter */
1724 if (force_stage == 1)
1725 id &= ~(ID0_S2TS | ID0_NTS);
1726 else if (force_stage == 2)
1727 id &= ~(ID0_S1TS | ID0_NTS);
1728
Will Deacon45ae7cf2013-06-24 18:31:25 +01001729 if (id & ID0_S1TS) {
1730 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1731 dev_notice(smmu->dev, "\tstage 1 translation\n");
1732 }
1733
1734 if (id & ID0_S2TS) {
1735 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1736 dev_notice(smmu->dev, "\tstage 2 translation\n");
1737 }
1738
1739 if (id & ID0_NTS) {
1740 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1741 dev_notice(smmu->dev, "\tnested translation\n");
1742 }
1743
1744 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001745 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001746 dev_err(smmu->dev, "\tno translation support!\n");
1747 return -ENODEV;
1748 }
1749
Robin Murphyb7862e32016-04-13 18:13:03 +01001750 if ((id & ID0_S1TS) &&
1751 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001752 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1753 dev_notice(smmu->dev, "\taddress translation ops\n");
1754 }
1755
Robin Murphybae2c2d2015-07-29 19:46:05 +01001756 /*
1757 * In order for DMA API calls to work properly, we must defer to what
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001758 * the FW says about coherency, regardless of what the hardware claims.
Robin Murphybae2c2d2015-07-29 19:46:05 +01001759 * Fortunately, this also opens up a workaround for systems where the
1760 * ID register value has ended up configured incorrectly.
1761 */
Robin Murphybae2c2d2015-07-29 19:46:05 +01001762 cttw_reg = !!(id & ID0_CTTW);
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001763 if (cttw_fw || cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001764 dev_notice(smmu->dev, "\t%scoherent table walk\n",
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001765 cttw_fw ? "" : "non-");
1766 if (cttw_fw != cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001767 dev_notice(smmu->dev,
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001768 "\t(IDR0.CTTW overridden by FW configuration)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001769
Robin Murphy21174242016-09-12 17:13:48 +01001770 /* Max. number of entries we have for stream matching/indexing */
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001771 if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
1772 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1773 size = 1 << 16;
1774 } else {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001775 size = 1 << FIELD_GET(ID0_NUMSIDB, id);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001776 }
Robin Murphy21174242016-09-12 17:13:48 +01001777 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001778 if (id & ID0_SMS) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001779 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy0caf5f42019-08-15 19:37:23 +01001780 size = FIELD_GET(ID0_NUMSMRG, id);
Robin Murphy21174242016-09-12 17:13:48 +01001781 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001782 dev_err(smmu->dev,
1783 "stream-matching supported, but no SMRs present!\n");
1784 return -ENODEV;
1785 }
1786
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001787 /* Zero-initialised to mark as invalid */
1788 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1789 GFP_KERNEL);
1790 if (!smmu->smrs)
1791 return -ENOMEM;
1792
Will Deacon45ae7cf2013-06-24 18:31:25 +01001793 dev_notice(smmu->dev,
Robin Murphy490325e2019-08-15 19:37:26 +01001794 "\tstream matching with %u register groups", size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001795 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001796 /* s2cr->type == 0 means translation, so initialise explicitly */
1797 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1798 GFP_KERNEL);
1799 if (!smmu->s2crs)
1800 return -ENOMEM;
1801 for (i = 0; i < size; i++)
1802 smmu->s2crs[i] = s2cr_init_val;
1803
Robin Murphy21174242016-09-12 17:13:48 +01001804 smmu->num_mapping_groups = size;
Robin Murphy588888a2016-09-12 17:13:54 +01001805 mutex_init(&smmu->stream_map_mutex);
Will Deacon8e517e72017-07-06 15:55:48 +01001806 spin_lock_init(&smmu->global_sync_lock);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001807
Robin Murphy7602b872016-04-28 17:12:09 +01001808 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1809 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1810 if (!(id & ID0_PTFS_NO_AARCH32S))
1811 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1812 }
1813
Will Deacon45ae7cf2013-06-24 18:31:25 +01001814 /* ID1 */
Robin Murphy00320ce2019-08-15 19:37:31 +01001815 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001816 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001817
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001818 /* Check for size mismatch of SMMU address space from mapped region */
Robin Murphy0caf5f42019-08-15 19:37:23 +01001819 size = 1 << (FIELD_GET(ID1_NUMPAGENDXB, id) + 1);
Robin Murphy490325e2019-08-15 19:37:26 +01001820 if (smmu->numpage != 2 * size << smmu->pgshift)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001821 dev_warn(smmu->dev,
Robin Murphy490325e2019-08-15 19:37:26 +01001822 "SMMU address space size (0x%x) differs from mapped region size (0x%x)!\n",
1823 2 * size << smmu->pgshift, smmu->numpage);
1824 /* Now properly encode NUMPAGE to subsequently derive SMMU_CB_BASE */
1825 smmu->numpage = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001826
Robin Murphy0caf5f42019-08-15 19:37:23 +01001827 smmu->num_s2_context_banks = FIELD_GET(ID1_NUMS2CB, id);
1828 smmu->num_context_banks = FIELD_GET(ID1_NUMCB, id);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001829 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1830 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1831 return -ENODEV;
1832 }
1833 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1834 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphy90df3732017-08-08 14:56:14 +01001835 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
1836 sizeof(*smmu->cbs), GFP_KERNEL);
1837 if (!smmu->cbs)
1838 return -ENOMEM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001839
1840 /* ID2 */
Robin Murphy00320ce2019-08-15 19:37:31 +01001841 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2);
Robin Murphy0caf5f42019-08-15 19:37:23 +01001842 size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_IAS, id));
Will Deacon518f7132014-11-14 17:17:54 +00001843 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001844
Will Deacon518f7132014-11-14 17:17:54 +00001845 /* The output mask is also applied for bypass */
Robin Murphy0caf5f42019-08-15 19:37:23 +01001846 size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_OAS, id));
Will Deacon518f7132014-11-14 17:17:54 +00001847 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001848
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001849 if (id & ID2_VMID16)
1850 smmu->features |= ARM_SMMU_FEAT_VMID16;
1851
Robin Murphyf1d84542015-03-04 16:41:05 +00001852 /*
1853 * What the page table walker can address actually depends on which
1854 * descriptor format is in use, but since a) we don't know that yet,
1855 * and b) it can vary per context bank, this will have to do...
1856 */
1857 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1858 dev_warn(smmu->dev,
1859 "failed to set DMA mask for table walker\n");
1860
Robin Murphyb7862e32016-04-13 18:13:03 +01001861 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001862 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001863 if (smmu->version == ARM_SMMU_V1_64K)
1864 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001865 } else {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001866 size = FIELD_GET(ID2_UBS, id);
Will Deacon518f7132014-11-14 17:17:54 +00001867 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001868 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001869 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001870 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001871 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001872 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001873 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001874 }
1875
Robin Murphy7602b872016-04-28 17:12:09 +01001876 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01001877 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01001878 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01001879 if (smmu->features &
1880 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01001881 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01001882 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01001883 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01001884 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01001885 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01001886
Robin Murphyd5466352016-05-09 17:20:09 +01001887 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1888 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1889 else
1890 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1891 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1892 smmu->pgsize_bitmap);
1893
Will Deacon518f7132014-11-14 17:17:54 +00001894
Will Deacon28d60072014-09-01 16:24:48 +01001895 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1896 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001897 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001898
1899 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1900 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001901 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001902
Robin Murphy3995e182019-08-15 19:37:35 +01001903 if (smmu->impl && smmu->impl->cfg_probe)
1904 return smmu->impl->cfg_probe(smmu);
1905
Will Deacon45ae7cf2013-06-24 18:31:25 +01001906 return 0;
1907}
1908
Robin Murphy67b65a32016-04-13 18:12:57 +01001909struct arm_smmu_match_data {
1910 enum arm_smmu_arch_version version;
1911 enum arm_smmu_implementation model;
1912};
1913
1914#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
Sricharan R96a299d2018-12-04 11:52:09 +05301915static const struct arm_smmu_match_data name = { .version = ver, .model = imp }
Robin Murphy67b65a32016-04-13 18:12:57 +01001916
1917ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1918ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01001919ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001920ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01001921ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Vivek Gautam89cddc52018-12-04 11:52:13 +05301922ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01001923
Joerg Roedel09b52692014-10-02 12:24:45 +02001924static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01001925 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1926 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1927 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01001928 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001929 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01001930 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Vivek Gautam89cddc52018-12-04 11:52:13 +05301931 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01001932 { },
1933};
Will Deaconb06c0762019-12-19 12:03:45 +00001934MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
Robin Murphy09360402014-08-28 17:51:59 +01001935
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001936#ifdef CONFIG_ACPI
1937static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
1938{
1939 int ret = 0;
1940
1941 switch (model) {
1942 case ACPI_IORT_SMMU_V1:
1943 case ACPI_IORT_SMMU_CORELINK_MMU400:
1944 smmu->version = ARM_SMMU_V1;
1945 smmu->model = GENERIC_SMMU;
1946 break;
Robin Murphy84c24372017-06-19 16:41:56 +01001947 case ACPI_IORT_SMMU_CORELINK_MMU401:
1948 smmu->version = ARM_SMMU_V1_64K;
1949 smmu->model = GENERIC_SMMU;
1950 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001951 case ACPI_IORT_SMMU_V2:
1952 smmu->version = ARM_SMMU_V2;
1953 smmu->model = GENERIC_SMMU;
1954 break;
1955 case ACPI_IORT_SMMU_CORELINK_MMU500:
1956 smmu->version = ARM_SMMU_V2;
1957 smmu->model = ARM_MMU500;
1958 break;
Robin Murphy84c24372017-06-19 16:41:56 +01001959 case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
1960 smmu->version = ARM_SMMU_V2;
1961 smmu->model = CAVIUM_SMMUV2;
1962 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001963 default:
1964 ret = -ENODEV;
1965 }
1966
1967 return ret;
1968}
1969
1970static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
1971 struct arm_smmu_device *smmu)
1972{
1973 struct device *dev = smmu->dev;
1974 struct acpi_iort_node *node =
1975 *(struct acpi_iort_node **)dev_get_platdata(dev);
1976 struct acpi_iort_smmu *iort_smmu;
1977 int ret;
1978
1979 /* Retrieve SMMU1/2 specific data */
1980 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
1981
1982 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
1983 if (ret < 0)
1984 return ret;
1985
1986 /* Ignore the configuration access interrupt */
1987 smmu->num_global_irqs = 1;
1988
1989 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
1990 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1991
1992 return 0;
1993}
1994#else
1995static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
1996 struct arm_smmu_device *smmu)
1997{
1998 return -ENODEV;
1999}
2000#endif
2001
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002002static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2003 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002004{
Robin Murphy67b65a32016-04-13 18:12:57 +01002005 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002006 struct device *dev = &pdev->dev;
Robin Murphy021bb842016-09-14 15:26:46 +01002007 bool legacy_binding;
2008
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002009 if (of_property_read_u32(dev->of_node, "#global-interrupts",
2010 &smmu->num_global_irqs)) {
2011 dev_err(dev, "missing #global-interrupts property\n");
2012 return -ENODEV;
2013 }
2014
2015 data = of_device_get_match_data(dev);
2016 smmu->version = data->version;
2017 smmu->model = data->model;
2018
Robin Murphy021bb842016-09-14 15:26:46 +01002019 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
2020 if (legacy_binding && !using_generic_binding) {
Will Deaconcd221bd2019-12-19 12:03:51 +00002021 if (!using_legacy_binding) {
2022 pr_notice("deprecated \"mmu-masters\" DT property in use; %s support unavailable\n",
2023 IS_ENABLED(CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS) ? "DMA API" : "SMMU");
2024 }
Robin Murphy021bb842016-09-14 15:26:46 +01002025 using_legacy_binding = true;
2026 } else if (!legacy_binding && !using_legacy_binding) {
2027 using_generic_binding = true;
2028 } else {
2029 dev_err(dev, "not probing due to mismatched DT properties\n");
2030 return -ENODEV;
2031 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002032
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002033 if (of_dma_is_coherent(dev->of_node))
2034 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2035
2036 return 0;
2037}
2038
Will Deacon73595722019-12-19 12:03:50 +00002039static int arm_smmu_bus_init(struct iommu_ops *ops)
Robin Murphyf6810c12017-04-10 16:51:05 +05302040{
Will Deacon73595722019-12-19 12:03:50 +00002041 int err;
2042
Robin Murphyf6810c12017-04-10 16:51:05 +05302043 /* Oh, for a proper bus abstraction */
Will Deacon73595722019-12-19 12:03:50 +00002044 if (!iommu_present(&platform_bus_type)) {
2045 err = bus_set_iommu(&platform_bus_type, ops);
2046 if (err)
2047 return err;
2048 }
Robin Murphyf6810c12017-04-10 16:51:05 +05302049#ifdef CONFIG_ARM_AMBA
Will Deacon73595722019-12-19 12:03:50 +00002050 if (!iommu_present(&amba_bustype)) {
2051 err = bus_set_iommu(&amba_bustype, ops);
2052 if (err)
2053 goto err_reset_platform_ops;
2054 }
Robin Murphyf6810c12017-04-10 16:51:05 +05302055#endif
2056#ifdef CONFIG_PCI
2057 if (!iommu_present(&pci_bus_type)) {
Will Deacon73595722019-12-19 12:03:50 +00002058 err = bus_set_iommu(&pci_bus_type, ops);
2059 if (err)
2060 goto err_reset_amba_ops;
Robin Murphyf6810c12017-04-10 16:51:05 +05302061 }
2062#endif
Nipun Guptaeab03e22018-09-10 19:19:18 +05302063#ifdef CONFIG_FSL_MC_BUS
Will Deacon73595722019-12-19 12:03:50 +00002064 if (!iommu_present(&fsl_mc_bus_type)) {
2065 err = bus_set_iommu(&fsl_mc_bus_type, ops);
2066 if (err)
2067 goto err_reset_pci_ops;
2068 }
Nipun Guptaeab03e22018-09-10 19:19:18 +05302069#endif
Will Deacon73595722019-12-19 12:03:50 +00002070 return 0;
2071
2072err_reset_pci_ops: __maybe_unused;
2073#ifdef CONFIG_PCI
2074 bus_set_iommu(&pci_bus_type, NULL);
2075#endif
2076err_reset_amba_ops: __maybe_unused;
2077#ifdef CONFIG_ARM_AMBA
2078 bus_set_iommu(&amba_bustype, NULL);
2079#endif
2080err_reset_platform_ops: __maybe_unused;
2081 bus_set_iommu(&platform_bus_type, NULL);
2082 return err;
Robin Murphyf6810c12017-04-10 16:51:05 +05302083}
2084
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002085static int arm_smmu_device_probe(struct platform_device *pdev)
2086{
2087 struct resource *res;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002088 resource_size_t ioaddr;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002089 struct arm_smmu_device *smmu;
2090 struct device *dev = &pdev->dev;
2091 int num_irqs, i, err;
2092
Will Deacon45ae7cf2013-06-24 18:31:25 +01002093 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2094 if (!smmu) {
2095 dev_err(dev, "failed to allocate arm_smmu_device\n");
2096 return -ENOMEM;
2097 }
2098 smmu->dev = dev;
2099
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002100 if (dev->of_node)
2101 err = arm_smmu_device_dt_probe(pdev, smmu);
2102 else
2103 err = arm_smmu_device_acpi_probe(pdev, smmu);
2104
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002105 if (err)
2106 return err;
Robin Murphy09360402014-08-28 17:51:59 +01002107
Robin Murphyfc058d32019-08-15 19:37:33 +01002108 smmu = arm_smmu_impl_init(smmu);
2109 if (IS_ERR(smmu))
2110 return PTR_ERR(smmu);
2111
Will Deacon45ae7cf2013-06-24 18:31:25 +01002112 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002113 ioaddr = res->start;
Julia Lawall8a7f4312013-08-19 12:20:37 +01002114 smmu->base = devm_ioremap_resource(dev, res);
2115 if (IS_ERR(smmu->base))
2116 return PTR_ERR(smmu->base);
Robin Murphy490325e2019-08-15 19:37:26 +01002117 /*
2118 * The resource size should effectively match the value of SMMU_TOP;
2119 * stash that temporarily until we know PAGESIZE to validate it with.
2120 */
2121 smmu->numpage = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002122
Will Deacon45ae7cf2013-06-24 18:31:25 +01002123 num_irqs = 0;
2124 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2125 num_irqs++;
2126 if (num_irqs > smmu->num_global_irqs)
2127 smmu->num_context_irqs++;
2128 }
2129
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002130 if (!smmu->num_context_irqs) {
2131 dev_err(dev, "found %d interrupts but expected at least %d\n",
2132 num_irqs, smmu->num_global_irqs + 1);
2133 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002134 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002135
Kees Cooka86854d2018-06-12 14:07:58 -07002136 smmu->irqs = devm_kcalloc(dev, num_irqs, sizeof(*smmu->irqs),
Will Deacon45ae7cf2013-06-24 18:31:25 +01002137 GFP_KERNEL);
2138 if (!smmu->irqs) {
2139 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2140 return -ENOMEM;
2141 }
2142
2143 for (i = 0; i < num_irqs; ++i) {
2144 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002145
Jean-Philippe Brucker34d1b082019-11-11 12:17:21 +01002146 if (irq < 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002147 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002148 smmu->irqs[i] = irq;
2149 }
2150
Sricharan R96a299d2018-12-04 11:52:09 +05302151 err = devm_clk_bulk_get_all(dev, &smmu->clks);
2152 if (err < 0) {
2153 dev_err(dev, "failed to get clocks %d\n", err);
2154 return err;
2155 }
2156 smmu->num_clks = err;
2157
2158 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
2159 if (err)
2160 return err;
2161
Olav Haugan3c8766d2014-08-22 17:12:32 -07002162 err = arm_smmu_device_cfg_probe(smmu);
2163 if (err)
2164 return err;
2165
Vivek Gautamd1e20222018-07-19 23:23:56 +05302166 if (smmu->version == ARM_SMMU_V2) {
2167 if (smmu->num_context_banks > smmu->num_context_irqs) {
2168 dev_err(dev,
2169 "found only %d context irq(s) but %d required\n",
2170 smmu->num_context_irqs, smmu->num_context_banks);
2171 return -ENODEV;
2172 }
2173
2174 /* Ignore superfluous interrupts */
2175 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002176 }
2177
Will Deacon45ae7cf2013-06-24 18:31:25 +01002178 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08002179 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2180 arm_smmu_global_fault,
2181 IRQF_SHARED,
2182 "arm-smmu global fault",
2183 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002184 if (err) {
2185 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2186 i, smmu->irqs[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01002187 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002188 }
2189 }
2190
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002191 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2192 "smmu.%pa", &ioaddr);
2193 if (err) {
2194 dev_err(dev, "Failed to register iommu in sysfs\n");
2195 return err;
2196 }
2197
2198 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2199 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2200
2201 err = iommu_device_register(&smmu->iommu);
2202 if (err) {
2203 dev_err(dev, "Failed to register iommu\n");
2204 return err;
2205 }
2206
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002207 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01002208 arm_smmu_device_reset(smmu);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03002209 arm_smmu_test_smr_masks(smmu);
Robin Murphy021bb842016-09-14 15:26:46 +01002210
Robin Murphyf6810c12017-04-10 16:51:05 +05302211 /*
Sricharan Rd4a44f02018-12-04 11:52:10 +05302212 * We want to avoid touching dev->power.lock in fastpaths unless
2213 * it's really going to do something useful - pm_runtime_enabled()
2214 * can serve as an ideal proxy for that decision. So, conditionally
2215 * enable pm_runtime.
2216 */
2217 if (dev->pm_domain) {
2218 pm_runtime_set_active(dev);
2219 pm_runtime_enable(dev);
2220 }
2221
2222 /*
Robin Murphyf6810c12017-04-10 16:51:05 +05302223 * For ACPI and generic DT bindings, an SMMU will be probed before
2224 * any device which might need it, so we want the bus ops in place
2225 * ready to handle default domain setup as soon as any SMMU exists.
2226 */
2227 if (!using_legacy_binding)
Will Deacon73595722019-12-19 12:03:50 +00002228 return arm_smmu_bus_init(&arm_smmu_ops);
Robin Murphyf6810c12017-04-10 16:51:05 +05302229
Will Deacon45ae7cf2013-06-24 18:31:25 +01002230 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002231}
2232
Will Deaconb06c0762019-12-19 12:03:45 +00002233static int arm_smmu_device_remove(struct platform_device *pdev)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002234{
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002235 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002236
2237 if (!smmu)
Will Deaconb06c0762019-12-19 12:03:45 +00002238 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002239
Will Deaconecfadb62013-07-31 19:21:28 +01002240 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002241 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002242
Will Deacon73595722019-12-19 12:03:50 +00002243 arm_smmu_bus_init(NULL);
2244 iommu_device_unregister(&smmu->iommu);
2245 iommu_device_sysfs_remove(&smmu->iommu);
2246
Sricharan Rd4a44f02018-12-04 11:52:10 +05302247 arm_smmu_rpm_get(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002248 /* Turn the thing off */
Robin Murphy00320ce2019-08-15 19:37:31 +01002249 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, sCR0_CLIENTPD);
Sricharan Rd4a44f02018-12-04 11:52:10 +05302250 arm_smmu_rpm_put(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302251
Sricharan Rd4a44f02018-12-04 11:52:10 +05302252 if (pm_runtime_enabled(smmu->dev))
2253 pm_runtime_force_suspend(smmu->dev);
2254 else
2255 clk_bulk_disable(smmu->num_clks, smmu->clks);
2256
2257 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
Will Deaconb06c0762019-12-19 12:03:45 +00002258 return 0;
2259}
2260
2261static void arm_smmu_device_shutdown(struct platform_device *pdev)
2262{
2263 arm_smmu_device_remove(pdev);
Nate Watterson7aa86192017-06-29 18:18:15 -04002264}
2265
Sricharan R96a299d2018-12-04 11:52:09 +05302266static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
Robin Murphya2d866f2017-08-08 14:56:15 +01002267{
2268 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
Sricharan R96a299d2018-12-04 11:52:09 +05302269 int ret;
2270
2271 ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
2272 if (ret)
2273 return ret;
Robin Murphya2d866f2017-08-08 14:56:15 +01002274
2275 arm_smmu_device_reset(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302276
Will Deacon45ae7cf2013-06-24 18:31:25 +01002277 return 0;
2278}
2279
Sricharan R96a299d2018-12-04 11:52:09 +05302280static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
Dan Carpenter6614ee72013-08-21 09:34:20 +01002281{
Sricharan R96a299d2018-12-04 11:52:09 +05302282 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2283
2284 clk_bulk_disable(smmu->num_clks, smmu->clks);
2285
2286 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002287}
2288
Robin Murphya2d866f2017-08-08 14:56:15 +01002289static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
2290{
Sricharan R96a299d2018-12-04 11:52:09 +05302291 if (pm_runtime_suspended(dev))
2292 return 0;
Robin Murphya2d866f2017-08-08 14:56:15 +01002293
Sricharan R96a299d2018-12-04 11:52:09 +05302294 return arm_smmu_runtime_resume(dev);
Robin Murphya2d866f2017-08-08 14:56:15 +01002295}
2296
Sricharan R96a299d2018-12-04 11:52:09 +05302297static int __maybe_unused arm_smmu_pm_suspend(struct device *dev)
2298{
2299 if (pm_runtime_suspended(dev))
2300 return 0;
2301
2302 return arm_smmu_runtime_suspend(dev);
2303}
2304
2305static const struct dev_pm_ops arm_smmu_pm_ops = {
2306 SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_pm_suspend, arm_smmu_pm_resume)
2307 SET_RUNTIME_PM_OPS(arm_smmu_runtime_suspend,
2308 arm_smmu_runtime_resume, NULL)
2309};
Robin Murphya2d866f2017-08-08 14:56:15 +01002310
Will Deacon45ae7cf2013-06-24 18:31:25 +01002311static struct platform_driver arm_smmu_driver = {
2312 .driver = {
Will Deacon34debdc2019-12-19 12:03:46 +00002313 .name = "arm-smmu",
2314 .of_match_table = of_match_ptr(arm_smmu_of_match),
2315 .pm = &arm_smmu_pm_ops,
2316 .suppress_bind_attrs = true,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002317 },
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002318 .probe = arm_smmu_device_probe,
Will Deaconb06c0762019-12-19 12:03:45 +00002319 .remove = arm_smmu_device_remove,
Nate Watterson7aa86192017-06-29 18:18:15 -04002320 .shutdown = arm_smmu_device_shutdown,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002321};
Will Deaconb06c0762019-12-19 12:03:45 +00002322module_platform_driver(arm_smmu_driver);
2323
2324MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2325MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
Ard Biesheuveld3daf662019-12-19 12:03:48 +00002326MODULE_ALIAS("platform:arm-smmu");
Will Deaconb06c0762019-12-19 12:03:45 +00002327MODULE_LICENSE("GPL v2");