blob: 424ebf38cd09f8d11db8e1a59e8f10f57ca25f6d [file] [log] [blame]
Thomas Gleixner45051532019-05-29 16:57:47 -07001// SPDX-License-Identifier: GPL-2.0-only
Will Deacon45ae7cf2013-06-24 18:31:25 +01002/*
3 * IOMMU API for ARM architected SMMU implementations.
4 *
Will Deacon45ae7cf2013-06-24 18:31:25 +01005 * Copyright (C) 2013 ARM Limited
6 *
7 * Author: Will Deacon <will.deacon@arm.com>
8 *
9 * This driver currently supports:
10 * - SMMUv1 and v2 implementations
11 * - Stream-matching and stream-indexing
12 * - v7/v8 long-descriptor format
13 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010014 * - Context fault reporting
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +030015 * - Extended Stream ID (16 bit)
Will Deacon45ae7cf2013-06-24 18:31:25 +010016 */
17
18#define pr_fmt(fmt) "arm-smmu: " fmt
19
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +000020#include <linux/acpi.h>
21#include <linux/acpi_iort.h>
Robin Murphy0caf5f42019-08-15 19:37:23 +010022#include <linux/bitfield.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010023#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000024#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010025#include <linux/dma-mapping.h>
26#include <linux/err.h>
27#include <linux/interrupt.h>
28#include <linux/io.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000029#include <linux/iopoll.h>
Paul Gortmakeraddb672f2018-12-01 14:19:16 -050030#include <linux/init.h>
31#include <linux/moduleparam.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010033#include <linux/of_address.h>
Robin Murphyd6fc5d92016-09-12 17:13:52 +010034#include <linux/of_device.h>
Robin Murphyadfec2e2016-09-12 17:13:55 +010035#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010036#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010037#include <linux/platform_device.h>
Sricharan R96a299d2018-12-04 11:52:09 +053038#include <linux/pm_runtime.h>
Robin Murphy931a0ba2019-09-17 15:45:34 +010039#include <linux/ratelimit.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010040#include <linux/slab.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041
42#include <linux/amba/bus.h>
Nipun Guptaeab03e22018-09-10 19:19:18 +053043#include <linux/fsl/mc.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010044
Robin Murphyc5fc6482019-08-15 19:37:32 +010045#include "arm-smmu.h"
Rob Clark2b037742017-08-09 10:43:03 -040046
Robin Murphy4e4abae2019-06-03 14:15:37 +020047/*
48 * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
49 * global register space are still, in fact, using a hypervisor to mediate it
50 * by trapping and emulating register accesses. Sadly, some deployed versions
51 * of said trapping code have bugs wherein they go horribly wrong for stores
52 * using r31 (i.e. XZR/WZR) as the source register.
53 */
54#define QCOM_DUMMY_VAL -1
55
Rob Clark2b037742017-08-09 10:43:03 -040056#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
57#define TLB_SPIN_COUNT 10
Will Deacon45ae7cf2013-06-24 18:31:25 +010058
Eric Augerf3ebee82017-01-19 20:57:55 +000059#define MSI_IOVA_BASE 0x8000000
60#define MSI_IOVA_LENGTH 0x100000
61
Will Deacon4cf740b2014-07-14 19:47:39 +010062static int force_stage;
Paul Gortmakeraddb672f2018-12-01 14:19:16 -050063/*
64 * not really modular, but the easiest way to keep compat with existing
65 * bootargs behaviour is to continue using module_param() here.
66 */
Robin Murphy25a1c962016-02-10 14:25:33 +000067module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +010068MODULE_PARM_DESC(force_stage,
69 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Douglas Anderson954a03b2019-03-01 11:20:17 -080070static bool disable_bypass =
71 IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT);
Robin Murphy25a1c962016-02-10 14:25:33 +000072module_param(disable_bypass, bool, S_IRUGO);
73MODULE_PARM_DESC(disable_bypass,
74 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +010075
Robin Murphy8e8b2032016-09-12 17:13:50 +010076struct arm_smmu_s2cr {
Robin Murphy588888a2016-09-12 17:13:54 +010077 struct iommu_group *group;
78 int count;
Robin Murphy8e8b2032016-09-12 17:13:50 +010079 enum arm_smmu_s2cr_type type;
80 enum arm_smmu_s2cr_privcfg privcfg;
81 u8 cbndx;
82};
83
84#define s2cr_init_val (struct arm_smmu_s2cr){ \
85 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
86}
87
Will Deacon45ae7cf2013-06-24 18:31:25 +010088struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +010089 u16 mask;
90 u16 id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +010091 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +010092};
93
Robin Murphy90df3732017-08-08 14:56:14 +010094struct arm_smmu_cb {
95 u64 ttbr[2];
96 u32 tcr[2];
97 u32 mair[2];
98 struct arm_smmu_cfg *cfg;
99};
100
Will Deacona9a1b0b2014-05-01 18:05:08 +0100101struct arm_smmu_master_cfg {
Robin Murphyf80cd882016-09-14 15:21:39 +0100102 struct arm_smmu_device *smmu;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100103 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100104};
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100105#define INVALID_SMENDX -1
Robin Murphyadfec2e2016-09-12 17:13:55 +0100106#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
107#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000108#define fwspec_smendx(fw, i) \
109 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphyadfec2e2016-09-12 17:13:55 +0100110#define for_each_cfg_sme(fw, i, idx) \
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000111 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100112
Robin Murphy021bb842016-09-14 15:26:46 +0100113static bool using_legacy_binding, using_generic_binding;
114
Sricharan Rd4a44f02018-12-04 11:52:10 +0530115static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
116{
117 if (pm_runtime_enabled(smmu->dev))
118 return pm_runtime_get_sync(smmu->dev);
119
120 return 0;
121}
122
123static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
124{
125 if (pm_runtime_enabled(smmu->dev))
Rob Clarkee9bdfe2019-10-31 14:31:02 -0700126 pm_runtime_put_autosuspend(smmu->dev);
Sricharan Rd4a44f02018-12-04 11:52:10 +0530127}
128
Joerg Roedel1d672632015-03-26 13:43:10 +0100129static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
130{
131 return container_of(dom, struct arm_smmu_domain, domain);
132}
133
Will Deacon8f68f8e2014-07-15 11:27:08 +0100134static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100135{
136 if (dev_is_pci(dev)) {
137 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700138
Will Deacona9a1b0b2014-05-01 18:05:08 +0100139 while (!pci_is_root_bus(bus))
140 bus = bus->parent;
Robin Murphyf80cd882016-09-14 15:21:39 +0100141 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100142 }
143
Robin Murphyf80cd882016-09-14 15:21:39 +0100144 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100145}
146
Robin Murphyf80cd882016-09-14 15:21:39 +0100147static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100148{
Robin Murphyf80cd882016-09-14 15:21:39 +0100149 *((__be32 *)data) = cpu_to_be32(alias);
150 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100151}
152
Robin Murphyf80cd882016-09-14 15:21:39 +0100153static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100154{
Robin Murphyf80cd882016-09-14 15:21:39 +0100155 struct of_phandle_iterator *it = *(void **)data;
156 struct device_node *np = it->node;
157 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100158
Robin Murphyf80cd882016-09-14 15:21:39 +0100159 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
Uwe Kleine-Königc680e9a2019-08-24 15:28:45 +0200160 "#stream-id-cells", -1)
Robin Murphyf80cd882016-09-14 15:21:39 +0100161 if (it->node == np) {
162 *(void **)data = dev;
163 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700164 }
Robin Murphyf80cd882016-09-14 15:21:39 +0100165 it->node = np;
166 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100167}
168
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100169static struct platform_driver arm_smmu_driver;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100170static struct iommu_ops arm_smmu_ops;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100171
Robin Murphyadfec2e2016-09-12 17:13:55 +0100172static int arm_smmu_register_legacy_master(struct device *dev,
173 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100174{
Robin Murphyadfec2e2016-09-12 17:13:55 +0100175 struct device *smmu_dev;
Robin Murphyf80cd882016-09-14 15:21:39 +0100176 struct device_node *np;
177 struct of_phandle_iterator it;
178 void *data = &it;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100179 u32 *sids;
Robin Murphyf80cd882016-09-14 15:21:39 +0100180 __be32 pci_sid;
181 int err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100182
Robin Murphyf80cd882016-09-14 15:21:39 +0100183 np = dev_get_dev_node(dev);
184 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
185 of_node_put(np);
186 return -ENODEV;
187 }
188
189 it.node = np;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100190 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
191 __find_legacy_master_phandle);
Robin Murphyadfec2e2016-09-12 17:13:55 +0100192 smmu_dev = data;
Robin Murphyf80cd882016-09-14 15:21:39 +0100193 of_node_put(np);
194 if (err == 0)
195 return -ENODEV;
196 if (err < 0)
197 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100198
Robin Murphyf80cd882016-09-14 15:21:39 +0100199 if (dev_is_pci(dev)) {
200 /* "mmu-masters" assumes Stream ID == Requester ID */
201 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
202 &pci_sid);
203 it.cur = &pci_sid;
204 it.cur_count = 1;
205 }
206
Robin Murphyadfec2e2016-09-12 17:13:55 +0100207 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
208 &arm_smmu_ops);
209 if (err)
210 return err;
211
212 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
213 if (!sids)
Robin Murphyf80cd882016-09-14 15:21:39 +0100214 return -ENOMEM;
215
Robin Murphyadfec2e2016-09-12 17:13:55 +0100216 *smmu = dev_get_drvdata(smmu_dev);
217 of_phandle_iterator_args(&it, sids, it.cur_count);
218 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
219 kfree(sids);
220 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100221}
222
223static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
224{
225 int idx;
226
227 do {
228 idx = find_next_zero_bit(map, end, start);
229 if (idx == end)
230 return -ENOSPC;
231 } while (test_and_set_bit(idx, map));
232
233 return idx;
234}
235
236static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
237{
238 clear_bit(idx, map);
239}
240
241/* Wait for any pending TLB invalidations to complete */
Robin Murphy19713fd2019-08-15 19:37:30 +0100242static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
243 int sync, int status)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100244{
Robin Murphy8513c892017-03-30 17:56:32 +0100245 unsigned int spin_cnt, delay;
Robin Murphy19713fd2019-08-15 19:37:30 +0100246 u32 reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100247
Robin Murphyae2b60f2019-09-18 17:17:50 +0100248 if (smmu->impl && unlikely(smmu->impl->tlb_sync))
249 return smmu->impl->tlb_sync(smmu, page, sync, status);
250
Robin Murphy19713fd2019-08-15 19:37:30 +0100251 arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
Robin Murphy8513c892017-03-30 17:56:32 +0100252 for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
253 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100254 reg = arm_smmu_readl(smmu, page, status);
255 if (!(reg & sTLBGSTATUS_GSACTIVE))
Robin Murphy8513c892017-03-30 17:56:32 +0100256 return;
257 cpu_relax();
Will Deacon45ae7cf2013-06-24 18:31:25 +0100258 }
Robin Murphy8513c892017-03-30 17:56:32 +0100259 udelay(delay);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100260 }
Robin Murphy8513c892017-03-30 17:56:32 +0100261 dev_err_ratelimited(smmu->dev,
262 "TLB sync timed out -- SMMU may be deadlocked\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +0100263}
264
Robin Murphy11febfc2017-03-30 17:56:31 +0100265static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
Will Deacon1463fe42013-07-31 19:21:27 +0100266{
Will Deacon8e517e72017-07-06 15:55:48 +0100267 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100268
Will Deacon8e517e72017-07-06 15:55:48 +0100269 spin_lock_irqsave(&smmu->global_sync_lock, flags);
Robin Murphy00320ce2019-08-15 19:37:31 +0100270 __arm_smmu_tlb_sync(smmu, ARM_SMMU_GR0, ARM_SMMU_GR0_sTLBGSYNC,
Robin Murphy19713fd2019-08-15 19:37:30 +0100271 ARM_SMMU_GR0_sTLBGSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100272 spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000273}
274
Robin Murphyae2b60f2019-09-18 17:17:50 +0100275static void arm_smmu_tlb_sync_context(struct arm_smmu_domain *smmu_domain)
Will Deacon1463fe42013-07-31 19:21:27 +0100276{
Robin Murphy11febfc2017-03-30 17:56:31 +0100277 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon8e517e72017-07-06 15:55:48 +0100278 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100279
Will Deacon8e517e72017-07-06 15:55:48 +0100280 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy19713fd2019-08-15 19:37:30 +0100281 __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx),
282 ARM_SMMU_CB_TLBSYNC, ARM_SMMU_CB_TLBSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100283 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000284}
285
Robin Murphy11febfc2017-03-30 17:56:31 +0100286static void arm_smmu_tlb_inv_context_s1(void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000287{
288 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy44f68762018-09-20 17:10:27 +0100289 /*
Robin Murphy19713fd2019-08-15 19:37:30 +0100290 * The TLBI write may be relaxed, so ensure that PTEs cleared by the
291 * current CPU are visible beforehand.
Robin Murphy44f68762018-09-20 17:10:27 +0100292 */
Robin Murphy19713fd2019-08-15 19:37:30 +0100293 wmb();
294 arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
295 ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
Robin Murphyae2b60f2019-09-18 17:17:50 +0100296 arm_smmu_tlb_sync_context(smmu_domain);
Robin Murphy11febfc2017-03-30 17:56:31 +0100297}
298
299static void arm_smmu_tlb_inv_context_s2(void *cookie)
300{
301 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100302 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100303
Robin Murphy00320ce2019-08-15 19:37:31 +0100304 /* See above */
305 wmb();
306 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
Robin Murphy11febfc2017-03-30 17:56:31 +0100307 arm_smmu_tlb_sync_global(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100308}
309
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100310static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
Robin Murphy3370cb62019-09-18 17:17:49 +0100311 size_t granule, void *cookie, int reg)
Will Deacon518f7132014-11-14 17:17:54 +0000312{
313 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100314 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000315 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy3370cb62019-09-18 17:17:49 +0100316 int idx = cfg->cbndx;
Will Deacon518f7132014-11-14 17:17:54 +0000317
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100318 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
Will Deacon7d321bd32018-10-01 12:42:49 +0100319 wmb();
320
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100321 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
322 iova = (iova >> 12) << 12;
323 iova |= cfg->asid;
Robin Murphy75df1382015-12-07 18:18:52 +0000324 do {
Robin Murphy19713fd2019-08-15 19:37:30 +0100325 arm_smmu_cb_write(smmu, idx, reg, iova);
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100326 iova += granule;
327 } while (size -= granule);
328 } else {
329 iova >>= 12;
330 iova |= (u64)cfg->asid << 48;
331 do {
Robin Murphy19713fd2019-08-15 19:37:30 +0100332 arm_smmu_cb_writeq(smmu, idx, reg, iova);
Robin Murphy75df1382015-12-07 18:18:52 +0000333 iova += granule >> 12;
334 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000335 }
336}
337
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100338static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
Robin Murphy3370cb62019-09-18 17:17:49 +0100339 size_t granule, void *cookie, int reg)
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100340{
341 struct arm_smmu_domain *smmu_domain = cookie;
342 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy3370cb62019-09-18 17:17:49 +0100343 int idx = smmu_domain->cfg.cbndx;
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100344
345 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
346 wmb();
347
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100348 iova >>= 12;
349 do {
Robin Murphy61005762019-08-15 19:37:28 +0100350 if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy19713fd2019-08-15 19:37:30 +0100351 arm_smmu_cb_writeq(smmu, idx, reg, iova);
Robin Murphy61005762019-08-15 19:37:28 +0100352 else
Robin Murphy19713fd2019-08-15 19:37:30 +0100353 arm_smmu_cb_write(smmu, idx, reg, iova);
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100354 iova += granule >> 12;
355 } while (size -= granule);
356}
357
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100358static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size,
359 size_t granule, void *cookie)
360{
Robin Murphy3370cb62019-09-18 17:17:49 +0100361 arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
362 ARM_SMMU_CB_S1_TLBIVA);
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100363 arm_smmu_tlb_sync_context(cookie);
364}
365
366static void arm_smmu_tlb_inv_leaf_s1(unsigned long iova, size_t size,
367 size_t granule, void *cookie)
368{
Robin Murphy3370cb62019-09-18 17:17:49 +0100369 arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
370 ARM_SMMU_CB_S1_TLBIVAL);
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100371 arm_smmu_tlb_sync_context(cookie);
372}
373
374static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather,
375 unsigned long iova, size_t granule,
376 void *cookie)
377{
Robin Murphy3370cb62019-09-18 17:17:49 +0100378 arm_smmu_tlb_inv_range_s1(iova, granule, granule, cookie,
379 ARM_SMMU_CB_S1_TLBIVAL);
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100380}
381
382static void arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size,
383 size_t granule, void *cookie)
384{
Robin Murphy3370cb62019-09-18 17:17:49 +0100385 arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie,
386 ARM_SMMU_CB_S2_TLBIIPAS2);
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100387 arm_smmu_tlb_sync_context(cookie);
388}
389
390static void arm_smmu_tlb_inv_leaf_s2(unsigned long iova, size_t size,
391 size_t granule, void *cookie)
392{
Robin Murphy3370cb62019-09-18 17:17:49 +0100393 arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie,
394 ARM_SMMU_CB_S2_TLBIIPAS2L);
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100395 arm_smmu_tlb_sync_context(cookie);
396}
397
398static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather,
399 unsigned long iova, size_t granule,
400 void *cookie)
401{
Robin Murphy3370cb62019-09-18 17:17:49 +0100402 arm_smmu_tlb_inv_range_s2(iova, granule, granule, cookie,
403 ARM_SMMU_CB_S2_TLBIIPAS2L);
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100404}
405
406static void arm_smmu_tlb_inv_any_s2_v1(unsigned long iova, size_t size,
407 size_t granule, void *cookie)
408{
409 arm_smmu_tlb_inv_context_s2(cookie);
410}
Robin Murphy11febfc2017-03-30 17:56:31 +0100411/*
412 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
413 * almost negligible, but the benefit of getting the first one in as far ahead
414 * of the sync as possible is significant, hence we don't just make this a
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100415 * no-op and call arm_smmu_tlb_inv_context_s2() from .iotlb_sync as you might
416 * think.
Robin Murphy11febfc2017-03-30 17:56:31 +0100417 */
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100418static void arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather,
419 unsigned long iova, size_t granule,
420 void *cookie)
Robin Murphy11febfc2017-03-30 17:56:31 +0100421{
422 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy00320ce2019-08-15 19:37:31 +0100423 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy11febfc2017-03-30 17:56:31 +0100424
Robin Murphy00320ce2019-08-15 19:37:31 +0100425 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
Will Deacon7d321bd32018-10-01 12:42:49 +0100426 wmb();
427
Robin Murphy00320ce2019-08-15 19:37:31 +0100428 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
Robin Murphy11febfc2017-03-30 17:56:31 +0100429}
430
Robin Murphy696bcfb2019-09-18 17:17:51 +0100431static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = {
432 .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
433 .tlb_flush_walk = arm_smmu_tlb_inv_walk_s1,
434 .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s1,
435 .tlb_add_page = arm_smmu_tlb_add_page_s1,
Robin Murphy11febfc2017-03-30 17:56:31 +0100436};
437
Robin Murphy696bcfb2019-09-18 17:17:51 +0100438static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
439 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
440 .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2,
441 .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s2,
442 .tlb_add_page = arm_smmu_tlb_add_page_s2,
Robin Murphy11febfc2017-03-30 17:56:31 +0100443};
444
Robin Murphy696bcfb2019-09-18 17:17:51 +0100445static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
446 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
447 .tlb_flush_walk = arm_smmu_tlb_inv_any_s2_v1,
448 .tlb_flush_leaf = arm_smmu_tlb_inv_any_s2_v1,
449 .tlb_add_page = arm_smmu_tlb_add_page_s2_v1,
Will Deacon518f7132014-11-14 17:17:54 +0000450};
451
Will Deacon45ae7cf2013-06-24 18:31:25 +0100452static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
453{
Vivek Gautambc580b52019-04-22 12:40:36 +0530454 u32 fsr, fsynr, cbfrsynra;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100455 unsigned long iova;
456 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100457 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100458 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy19713fd2019-08-15 19:37:30 +0100459 int idx = smmu_domain->cfg.cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100460
Robin Murphy19713fd2019-08-15 19:37:30 +0100461 fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100462 if (!(fsr & FSR_FAULT))
463 return IRQ_NONE;
464
Robin Murphy19713fd2019-08-15 19:37:30 +0100465 fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
466 iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
467 cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100468
Will Deacon3714ce1d2016-08-05 19:49:45 +0100469 dev_err_ratelimited(smmu->dev,
Vivek Gautambc580b52019-04-22 12:40:36 +0530470 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
Robin Murphy19713fd2019-08-15 19:37:30 +0100471 fsr, iova, fsynr, cbfrsynra, idx);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100472
Robin Murphy19713fd2019-08-15 19:37:30 +0100473 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100474 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100475}
476
477static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
478{
479 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
480 struct arm_smmu_device *smmu = dev;
Robin Murphy931a0ba2019-09-17 15:45:34 +0100481 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
482 DEFAULT_RATELIMIT_BURST);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100483
Robin Murphy00320ce2019-08-15 19:37:31 +0100484 gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
485 gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0);
486 gfsynr1 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR1);
487 gfsynr2 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100488
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000489 if (!gfsr)
490 return IRQ_NONE;
491
Robin Murphy931a0ba2019-09-17 15:45:34 +0100492 if (__ratelimit(&rs)) {
493 if (IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT) &&
494 (gfsr & sGFSR_USF))
495 dev_err(smmu->dev,
496 "Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may have security implications\n",
497 (u16)gfsynr1);
498 else
499 dev_err(smmu->dev,
500 "Unexpected global fault, this could be serious\n");
501 dev_err(smmu->dev,
502 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
503 gfsr, gfsynr0, gfsynr1, gfsynr2);
504 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100505
Robin Murphy00320ce2019-08-15 19:37:31 +0100506 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr);
Will Deaconadaba322013-07-31 19:21:26 +0100507 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100508}
509
Will Deacon518f7132014-11-14 17:17:54 +0000510static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
511 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100512{
Will Deacon44680ee2014-06-25 11:29:12 +0100513 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy90df3732017-08-08 14:56:14 +0100514 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
515 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
516
517 cb->cfg = cfg;
518
Robin Murphy620565a2019-08-15 19:37:25 +0100519 /* TCR */
Robin Murphy90df3732017-08-08 14:56:14 +0100520 if (stage1) {
521 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
522 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
523 } else {
524 cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
525 cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Robin Murphy620565a2019-08-15 19:37:25 +0100526 cb->tcr[1] |= FIELD_PREP(TCR2_SEP, TCR2_SEP_UPSTREAM);
Robin Murphy90df3732017-08-08 14:56:14 +0100527 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy620565a2019-08-15 19:37:25 +0100528 cb->tcr[1] |= TCR2_AS;
Robin Murphy90df3732017-08-08 14:56:14 +0100529 }
530 } else {
531 cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
532 }
533
534 /* TTBRs */
535 if (stage1) {
536 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
537 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
538 cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
539 } else {
540 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Robin Murphy620565a2019-08-15 19:37:25 +0100541 cb->ttbr[0] |= FIELD_PREP(TTBRn_ASID, cfg->asid);
Robin Murphy90df3732017-08-08 14:56:14 +0100542 cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Robin Murphy620565a2019-08-15 19:37:25 +0100543 cb->ttbr[1] |= FIELD_PREP(TTBRn_ASID, cfg->asid);
Robin Murphy90df3732017-08-08 14:56:14 +0100544 }
545 } else {
546 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
547 }
548
549 /* MAIRs (stage-1 only) */
550 if (stage1) {
551 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
552 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
553 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
554 } else {
Robin Murphy205577a2019-10-25 19:08:36 +0100555 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair;
556 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair >> 32;
Robin Murphy90df3732017-08-08 14:56:14 +0100557 }
558 }
559}
560
561static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
562{
563 u32 reg;
564 bool stage1;
565 struct arm_smmu_cb *cb = &smmu->cbs[idx];
566 struct arm_smmu_cfg *cfg = cb->cfg;
Robin Murphy90df3732017-08-08 14:56:14 +0100567
568 /* Unassigned context banks only need disabling */
569 if (!cfg) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100570 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, 0);
Robin Murphy90df3732017-08-08 14:56:14 +0100571 return;
572 }
573
Will Deacon44680ee2014-06-25 11:29:12 +0100574 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100575
Robin Murphy90df3732017-08-08 14:56:14 +0100576 /* CBA2R */
Will Deacon4a1c93c2015-03-04 12:21:03 +0000577 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100578 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy5114e962019-08-15 19:37:24 +0100579 reg = CBA2R_VA64;
Robin Murphy7602b872016-04-28 17:12:09 +0100580 else
Robin Murphy5114e962019-08-15 19:37:24 +0100581 reg = 0;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800582 /* 16-bit VMIDs live in CBA2R */
583 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Robin Murphy5114e962019-08-15 19:37:24 +0100584 reg |= FIELD_PREP(CBA2R_VMID16, cfg->vmid);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800585
Robin Murphyaadbf212019-08-15 19:37:29 +0100586 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBA2R(idx), reg);
Will Deacon4a1c93c2015-03-04 12:21:03 +0000587 }
588
Will Deacon45ae7cf2013-06-24 18:31:25 +0100589 /* CBAR */
Robin Murphy5114e962019-08-15 19:37:24 +0100590 reg = FIELD_PREP(CBAR_TYPE, cfg->cbar);
Robin Murphyb7862e32016-04-13 18:13:03 +0100591 if (smmu->version < ARM_SMMU_V2)
Robin Murphy5114e962019-08-15 19:37:24 +0100592 reg |= FIELD_PREP(CBAR_IRPTNDX, cfg->irptndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100593
Will Deacon57ca90f2014-02-06 14:59:05 +0000594 /*
595 * Use the weakest shareability/memory types, so they are
596 * overridden by the ttbcr/pte.
597 */
598 if (stage1) {
Robin Murphy5114e962019-08-15 19:37:24 +0100599 reg |= FIELD_PREP(CBAR_S1_BPSHCFG, CBAR_S1_BPSHCFG_NSH) |
600 FIELD_PREP(CBAR_S1_MEMATTR, CBAR_S1_MEMATTR_WB);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800601 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
602 /* 8-bit VMIDs live in CBAR */
Robin Murphy5114e962019-08-15 19:37:24 +0100603 reg |= FIELD_PREP(CBAR_VMID, cfg->vmid);
Will Deacon57ca90f2014-02-06 14:59:05 +0000604 }
Robin Murphyaadbf212019-08-15 19:37:29 +0100605 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100606
Sunil Goutham125458a2017-03-28 16:11:12 +0530607 /*
Robin Murphy620565a2019-08-15 19:37:25 +0100608 * TCR
Sunil Goutham125458a2017-03-28 16:11:12 +0530609 * We must write this before the TTBRs, since it determines the
610 * access behaviour of some fields (in particular, ASID[15:8]).
611 */
Robin Murphy90df3732017-08-08 14:56:14 +0100612 if (stage1 && smmu->version > ARM_SMMU_V1)
Robin Murphy19713fd2019-08-15 19:37:30 +0100613 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]);
614 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100615
Will Deacon45ae7cf2013-06-24 18:31:25 +0100616 /* TTBRs */
Robin Murphy90df3732017-08-08 14:56:14 +0100617 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100618 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid);
619 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
620 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100621 } else {
Robin Murphy19713fd2019-08-15 19:37:30 +0100622 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
Robin Murphy90df3732017-08-08 14:56:14 +0100623 if (stage1)
Robin Murphy19713fd2019-08-15 19:37:30 +0100624 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR1,
625 cb->ttbr[1]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100626 }
627
Will Deacon518f7132014-11-14 17:17:54 +0000628 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100629 if (stage1) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100630 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]);
631 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100632 }
633
Will Deacon45ae7cf2013-06-24 18:31:25 +0100634 /* SCTLR */
Robin Murphy60705292016-08-11 17:44:06 +0100635 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100636 if (stage1)
637 reg |= SCTLR_S1_ASIDPNE;
Robin Murphy90df3732017-08-08 14:56:14 +0100638 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
639 reg |= SCTLR_E;
640
Robin Murphy19713fd2019-08-15 19:37:30 +0100641 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100642}
643
644static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100645 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100646{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100647 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000648 unsigned long ias, oas;
649 struct io_pgtable_ops *pgtbl_ops;
650 struct io_pgtable_cfg pgtbl_cfg;
651 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100652 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100653 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100654
Will Deacon518f7132014-11-14 17:17:54 +0000655 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100656 if (smmu_domain->smmu)
657 goto out_unlock;
658
Will Deacon61bc6712017-01-06 16:56:03 +0000659 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
660 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
661 smmu_domain->smmu = smmu;
662 goto out_unlock;
663 }
664
Will Deaconc752ce42014-06-25 22:46:31 +0100665 /*
666 * Mapping the requested stage onto what we support is surprisingly
667 * complicated, mainly because the spec allows S1+S2 SMMUs without
668 * support for nested translation. That means we end up with the
669 * following table:
670 *
671 * Requested Supported Actual
672 * S1 N S1
673 * S1 S1+S2 S1
674 * S1 S2 S2
675 * S1 S1 S1
676 * N N N
677 * N S1+S2 S2
678 * N S2 S2
679 * N S1 S1
680 *
681 * Note that you can't actually request stage-2 mappings.
682 */
683 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
684 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
685 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
686 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
687
Robin Murphy7602b872016-04-28 17:12:09 +0100688 /*
689 * Choosing a suitable context format is even more fiddly. Until we
690 * grow some way for the caller to express a preference, and/or move
691 * the decision into the io-pgtable code where it arguably belongs,
692 * just aim for the closest thing to the rest of the system, and hope
693 * that the hardware isn't esoteric enough that we can't assume AArch64
694 * support to be a superset of AArch32 support...
695 */
696 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
697 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100698 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
699 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
700 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
701 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
702 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100703 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
704 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
705 ARM_SMMU_FEAT_FMT_AARCH64_16K |
706 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
707 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
708
709 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
710 ret = -EINVAL;
711 goto out_unlock;
712 }
713
Will Deaconc752ce42014-06-25 22:46:31 +0100714 switch (smmu_domain->stage) {
715 case ARM_SMMU_DOMAIN_S1:
716 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
717 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000718 ias = smmu->va_size;
719 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100720 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000721 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +0100722 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +0000723 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100724 ias = min(ias, 32UL);
725 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +0100726 } else {
727 fmt = ARM_V7S;
728 ias = min(ias, 32UL);
729 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +0100730 }
Will Deaconabfd6fe2019-07-02 16:44:41 +0100731 smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops;
Will Deaconc752ce42014-06-25 22:46:31 +0100732 break;
733 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100734 /*
735 * We will likely want to change this if/when KVM gets
736 * involved.
737 */
Will Deaconc752ce42014-06-25 22:46:31 +0100738 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100739 cfg->cbar = CBAR_TYPE_S2_TRANS;
740 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000741 ias = smmu->ipa_size;
742 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100743 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000744 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100745 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000746 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100747 ias = min(ias, 40UL);
748 oas = min(oas, 40UL);
749 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100750 if (smmu->version == ARM_SMMU_V2)
Will Deaconabfd6fe2019-07-02 16:44:41 +0100751 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2;
Robin Murphy11febfc2017-03-30 17:56:31 +0100752 else
Will Deaconabfd6fe2019-07-02 16:44:41 +0100753 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1;
Will Deaconc752ce42014-06-25 22:46:31 +0100754 break;
755 default:
756 ret = -EINVAL;
757 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100758 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100759 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
760 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200761 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100762 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100763
Will Deacon44680ee2014-06-25 11:29:12 +0100764 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100765 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100766 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
767 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100768 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100769 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100770 }
771
Robin Murphy280b6832017-03-30 17:56:29 +0100772 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
Robin Murphyba7e4a02019-08-15 19:37:37 +0100773 cfg->vmid = cfg->cbndx + 1;
Robin Murphy280b6832017-03-30 17:56:29 +0100774 else
Robin Murphyba7e4a02019-08-15 19:37:37 +0100775 cfg->asid = cfg->cbndx;
776
777 smmu_domain->smmu = smmu;
778 if (smmu->impl && smmu->impl->init_context) {
779 ret = smmu->impl->init_context(smmu_domain);
780 if (ret)
781 goto out_unlock;
782 }
Robin Murphy280b6832017-03-30 17:56:29 +0100783
Will Deacon518f7132014-11-14 17:17:54 +0000784 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100785 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000786 .ias = ias,
787 .oas = oas,
Will Deacon4f418452019-06-25 12:51:25 +0100788 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
Robin Murphy696bcfb2019-09-18 17:17:51 +0100789 .tlb = smmu_domain->flush_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100790 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000791 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100792
Robin Murphy44f68762018-09-20 17:10:27 +0100793 if (smmu_domain->non_strict)
794 pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
795
Will Deacon518f7132014-11-14 17:17:54 +0000796 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
797 if (!pgtbl_ops) {
798 ret = -ENOMEM;
799 goto out_clear_smmu;
800 }
801
Robin Murphyd5466352016-05-09 17:20:09 +0100802 /* Update the domain's page sizes to reflect the page table format */
803 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Robin Murphy455eb7d2016-09-12 17:13:58 +0100804 domain->geometry.aperture_end = (1UL << ias) - 1;
805 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +0000806
807 /* Initialise the context bank with our page table cfg */
808 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
Robin Murphy90df3732017-08-08 14:56:14 +0100809 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000810
811 /*
812 * Request context fault interrupt. Do this last to avoid the
813 * handler seeing a half-initialised domain state.
814 */
Will Deacon44680ee2014-06-25 11:29:12 +0100815 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800816 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
817 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200818 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100819 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100820 cfg->irptndx, irq);
821 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100822 }
823
Will Deacon518f7132014-11-14 17:17:54 +0000824 mutex_unlock(&smmu_domain->init_mutex);
825
826 /* Publish page table ops for map/unmap */
827 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100828 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100829
Will Deacon518f7132014-11-14 17:17:54 +0000830out_clear_smmu:
Liu Xiang6db7bfb2019-09-16 21:53:00 +0800831 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000832 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100833out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +0000834 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100835 return ret;
836}
837
838static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
839{
Joerg Roedel1d672632015-03-26 13:43:10 +0100840 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100841 struct arm_smmu_device *smmu = smmu_domain->smmu;
842 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Sricharan Rd4a44f02018-12-04 11:52:10 +0530843 int ret, irq;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100844
Will Deacon61bc6712017-01-06 16:56:03 +0000845 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100846 return;
847
Sricharan Rd4a44f02018-12-04 11:52:10 +0530848 ret = arm_smmu_rpm_get(smmu);
849 if (ret < 0)
850 return;
851
Will Deacon518f7132014-11-14 17:17:54 +0000852 /*
853 * Disable the context bank and free the page tables before freeing
854 * it.
855 */
Robin Murphy90df3732017-08-08 14:56:14 +0100856 smmu->cbs[cfg->cbndx].cfg = NULL;
857 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100858
Will Deacon44680ee2014-06-25 11:29:12 +0100859 if (cfg->irptndx != INVALID_IRPTNDX) {
860 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800861 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100862 }
863
Markus Elfring44830b02015-11-06 18:32:41 +0100864 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +0100865 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Sricharan Rd4a44f02018-12-04 11:52:10 +0530866
867 arm_smmu_rpm_put(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100868}
869
Joerg Roedel1d672632015-03-26 13:43:10 +0100870static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100871{
872 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100873
Will Deacon61bc6712017-01-06 16:56:03 +0000874 if (type != IOMMU_DOMAIN_UNMANAGED &&
875 type != IOMMU_DOMAIN_DMA &&
876 type != IOMMU_DOMAIN_IDENTITY)
Joerg Roedel1d672632015-03-26 13:43:10 +0100877 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100878 /*
879 * Allocate the domain and initialise some of its data structures.
880 * We can't really do anything meaningful until we've added a
881 * master.
882 */
883 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
884 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +0100885 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100886
Robin Murphy021bb842016-09-14 15:26:46 +0100887 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
888 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +0000889 kfree(smmu_domain);
890 return NULL;
891 }
892
Will Deacon518f7132014-11-14 17:17:54 +0000893 mutex_init(&smmu_domain->init_mutex);
Robin Murphy523d7422017-06-22 16:53:56 +0100894 spin_lock_init(&smmu_domain->cb_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +0100895
896 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100897}
898
Joerg Roedel1d672632015-03-26 13:43:10 +0100899static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100900{
Joerg Roedel1d672632015-03-26 13:43:10 +0100901 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +0100902
903 /*
904 * Free the domain resources. We assume that all devices have
905 * already been detached.
906 */
Robin Murphy9adb9592016-01-26 18:06:36 +0000907 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100908 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100909 kfree(smmu_domain);
910}
911
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100912static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
913{
914 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphy0caf5f42019-08-15 19:37:23 +0100915 u32 reg = FIELD_PREP(SMR_ID, smr->id) | FIELD_PREP(SMR_MASK, smr->mask);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100916
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300917 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100918 reg |= SMR_VALID;
Robin Murphy00320ce2019-08-15 19:37:31 +0100919 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100920}
921
Robin Murphy8e8b2032016-09-12 17:13:50 +0100922static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
923{
924 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
Robin Murphy0caf5f42019-08-15 19:37:23 +0100925 u32 reg = FIELD_PREP(S2CR_TYPE, s2cr->type) |
926 FIELD_PREP(S2CR_CBNDX, s2cr->cbndx) |
927 FIELD_PREP(S2CR_PRIVCFG, s2cr->privcfg);
Robin Murphy8e8b2032016-09-12 17:13:50 +0100928
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300929 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
930 smmu->smrs[idx].valid)
931 reg |= S2CR_EXIDVALID;
Robin Murphy00320ce2019-08-15 19:37:31 +0100932 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
Robin Murphy8e8b2032016-09-12 17:13:50 +0100933}
934
935static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
936{
937 arm_smmu_write_s2cr(smmu, idx);
938 if (smmu->smrs)
939 arm_smmu_write_smr(smmu, idx);
940}
941
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300942/*
943 * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
944 * should be called after sCR0 is written.
945 */
946static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
947{
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300948 u32 smr;
949
950 if (!smmu->smrs)
951 return;
952
953 /*
954 * SMR.ID bits may not be preserved if the corresponding MASK
955 * bits are set, so check each one separately. We can reject
956 * masters later if they try to claim IDs outside these masks.
957 */
Robin Murphy0caf5f42019-08-15 19:37:23 +0100958 smr = FIELD_PREP(SMR_ID, smmu->streamid_mask);
Robin Murphy00320ce2019-08-15 19:37:31 +0100959 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(0), smr);
960 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(0));
Robin Murphy0caf5f42019-08-15 19:37:23 +0100961 smmu->streamid_mask = FIELD_GET(SMR_ID, smr);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300962
Robin Murphy0caf5f42019-08-15 19:37:23 +0100963 smr = FIELD_PREP(SMR_MASK, smmu->streamid_mask);
Robin Murphy00320ce2019-08-15 19:37:31 +0100964 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(0), smr);
965 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(0));
Robin Murphy0caf5f42019-08-15 19:37:23 +0100966 smmu->smr_mask_mask = FIELD_GET(SMR_MASK, smr);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300967}
968
Robin Murphy588888a2016-09-12 17:13:54 +0100969static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100970{
971 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy588888a2016-09-12 17:13:54 +0100972 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100973
Robin Murphy588888a2016-09-12 17:13:54 +0100974 /* Stream indexing is blissfully easy */
975 if (!smrs)
976 return id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100977
Robin Murphy588888a2016-09-12 17:13:54 +0100978 /* Validating SMRs is... less so */
979 for (i = 0; i < smmu->num_mapping_groups; ++i) {
980 if (!smrs[i].valid) {
981 /*
982 * Note the first free entry we come across, which
983 * we'll claim in the end if nothing else matches.
984 */
985 if (free_idx < 0)
986 free_idx = i;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100987 continue;
988 }
Robin Murphy588888a2016-09-12 17:13:54 +0100989 /*
990 * If the new entry is _entirely_ matched by an existing entry,
991 * then reuse that, with the guarantee that there also cannot
992 * be any subsequent conflicting entries. In normal use we'd
993 * expect simply identical entries for this case, but there's
994 * no harm in accommodating the generalisation.
995 */
996 if ((mask & smrs[i].mask) == mask &&
997 !((id ^ smrs[i].id) & ~smrs[i].mask))
998 return i;
999 /*
1000 * If the new entry has any other overlap with an existing one,
1001 * though, then there always exists at least one stream ID
1002 * which would cause a conflict, and we can't allow that risk.
1003 */
1004 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1005 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001006 }
1007
Robin Murphy588888a2016-09-12 17:13:54 +01001008 return free_idx;
1009}
1010
1011static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1012{
1013 if (--smmu->s2crs[idx].count)
1014 return false;
1015
1016 smmu->s2crs[idx] = s2cr_init_val;
1017 if (smmu->smrs)
1018 smmu->smrs[idx].valid = false;
1019
1020 return true;
1021}
1022
1023static int arm_smmu_master_alloc_smes(struct device *dev)
1024{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001025 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001026 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy588888a2016-09-12 17:13:54 +01001027 struct arm_smmu_device *smmu = cfg->smmu;
1028 struct arm_smmu_smr *smrs = smmu->smrs;
1029 struct iommu_group *group;
1030 int i, idx, ret;
1031
1032 mutex_lock(&smmu->stream_map_mutex);
1033 /* Figure out a viable stream map entry allocation */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001034 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001035 u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]);
1036 u16 mask = FIELD_GET(SMR_MASK, fwspec->ids[i]);
Robin Murphy021bb842016-09-14 15:26:46 +01001037
Robin Murphy588888a2016-09-12 17:13:54 +01001038 if (idx != INVALID_SMENDX) {
1039 ret = -EEXIST;
1040 goto out_err;
1041 }
1042
Robin Murphy021bb842016-09-14 15:26:46 +01001043 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy588888a2016-09-12 17:13:54 +01001044 if (ret < 0)
1045 goto out_err;
1046
1047 idx = ret;
1048 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy021bb842016-09-14 15:26:46 +01001049 smrs[idx].id = sid;
1050 smrs[idx].mask = mask;
Robin Murphy588888a2016-09-12 17:13:54 +01001051 smrs[idx].valid = true;
1052 }
1053 smmu->s2crs[idx].count++;
1054 cfg->smendx[i] = (s16)idx;
1055 }
1056
1057 group = iommu_group_get_for_dev(dev);
Robin Murphy588888a2016-09-12 17:13:54 +01001058 if (IS_ERR(group)) {
1059 ret = PTR_ERR(group);
1060 goto out_err;
1061 }
1062 iommu_group_put(group);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001063
Will Deacon45ae7cf2013-06-24 18:31:25 +01001064 /* It worked! Now, poke the actual hardware */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001065 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001066 arm_smmu_write_sme(smmu, idx);
1067 smmu->s2crs[idx].group = group;
1068 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001069
Robin Murphy588888a2016-09-12 17:13:54 +01001070 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001071 return 0;
1072
Robin Murphy588888a2016-09-12 17:13:54 +01001073out_err:
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001074 while (i--) {
Robin Murphy588888a2016-09-12 17:13:54 +01001075 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001076 cfg->smendx[i] = INVALID_SMENDX;
1077 }
Robin Murphy588888a2016-09-12 17:13:54 +01001078 mutex_unlock(&smmu->stream_map_mutex);
1079 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001080}
1081
Robin Murphyadfec2e2016-09-12 17:13:55 +01001082static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001083{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001084 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1085 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphyd3097e32016-09-12 17:13:53 +01001086 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001087
Robin Murphy588888a2016-09-12 17:13:54 +01001088 mutex_lock(&smmu->stream_map_mutex);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001089 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001090 if (arm_smmu_free_sme(smmu, idx))
1091 arm_smmu_write_sme(smmu, idx);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001092 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001093 }
Robin Murphy588888a2016-09-12 17:13:54 +01001094 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001095}
1096
Will Deacon45ae7cf2013-06-24 18:31:25 +01001097static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphyadfec2e2016-09-12 17:13:55 +01001098 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001099{
Will Deacon44680ee2014-06-25 11:29:12 +01001100 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001101 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001102 u8 cbndx = smmu_domain->cfg.cbndx;
Will Deacon61bc6712017-01-06 16:56:03 +00001103 enum arm_smmu_s2cr_type type;
Robin Murphy588888a2016-09-12 17:13:54 +01001104 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001105
Will Deacon61bc6712017-01-06 16:56:03 +00001106 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
1107 type = S2CR_TYPE_BYPASS;
1108 else
1109 type = S2CR_TYPE_TRANS;
1110
Robin Murphyadfec2e2016-09-12 17:13:55 +01001111 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy8e8b2032016-09-12 17:13:50 +01001112 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy588888a2016-09-12 17:13:54 +01001113 continue;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001114
Robin Murphy8e8b2032016-09-12 17:13:50 +01001115 s2cr[idx].type = type;
Sricharan Re1989802017-01-06 18:58:15 +05301116 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001117 s2cr[idx].cbndx = cbndx;
1118 arm_smmu_write_s2cr(smmu, idx);
Will Deacon43b412b2014-07-15 11:22:24 +01001119 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001120 return 0;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001121}
1122
Will Deacon45ae7cf2013-06-24 18:31:25 +01001123static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1124{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001125 int ret;
Joerg Roedel9b468f72018-11-29 14:01:00 +01001126 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001127 struct arm_smmu_device *smmu;
Joerg Roedel1d672632015-03-26 13:43:10 +01001128 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001129
Robin Murphyadfec2e2016-09-12 17:13:55 +01001130 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001131 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1132 return -ENXIO;
1133 }
1134
Robin Murphyfba4f8e2016-10-17 12:06:21 +01001135 /*
1136 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1137 * domains between of_xlate() and add_device() - we have no way to cope
1138 * with that, so until ARM gets converted to rely on groups and default
1139 * domains, just say no (but more politely than by dereferencing NULL).
1140 * This should be at least a WARN_ON once that's sorted.
1141 */
1142 if (!fwspec->iommu_priv)
1143 return -ENODEV;
1144
Robin Murphyadfec2e2016-09-12 17:13:55 +01001145 smmu = fwspec_smmu(fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301146
1147 ret = arm_smmu_rpm_get(smmu);
1148 if (ret < 0)
1149 return ret;
1150
Will Deacon518f7132014-11-14 17:17:54 +00001151 /* Ensure that the domain is finalised */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001152 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001153 if (ret < 0)
Sricharan Rd4a44f02018-12-04 11:52:10 +05301154 goto rpm_put;
Will Deacon518f7132014-11-14 17:17:54 +00001155
Will Deacon45ae7cf2013-06-24 18:31:25 +01001156 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001157 * Sanity check the domain. We don't support domains across
1158 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001159 */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001160 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001161 dev_err(dev,
1162 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphyadfec2e2016-09-12 17:13:55 +01001163 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Sricharan Rd4a44f02018-12-04 11:52:10 +05301164 ret = -EINVAL;
1165 goto rpm_put;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001166 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001167
1168 /* Looks ok, so add the device to the domain */
Sricharan Rd4a44f02018-12-04 11:52:10 +05301169 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
1170
Rob Clarkee9bdfe2019-10-31 14:31:02 -07001171 /*
1172 * Setup an autosuspend delay to avoid bouncing runpm state.
1173 * Otherwise, if a driver for a suspended consumer device
1174 * unmaps buffers, it will runpm resume/suspend for each one.
1175 *
1176 * For example, when used by a GPU device, when an application
1177 * or game exits, it can trigger unmapping 100s or 1000s of
1178 * buffers. With a runpm cycle for each buffer, that adds up
1179 * to 5-10sec worth of reprogramming the context bank, while
1180 * the system appears to be locked up to the user.
1181 */
1182 pm_runtime_set_autosuspend_delay(smmu->dev, 20);
1183 pm_runtime_use_autosuspend(smmu->dev);
1184
Sricharan Rd4a44f02018-12-04 11:52:10 +05301185rpm_put:
1186 arm_smmu_rpm_put(smmu);
1187 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001188}
1189
Will Deacon45ae7cf2013-06-24 18:31:25 +01001190static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001191 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001192{
Robin Murphy523d7422017-06-22 16:53:56 +01001193 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301194 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1195 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001196
Will Deacon518f7132014-11-14 17:17:54 +00001197 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001198 return -ENODEV;
1199
Sricharan Rd4a44f02018-12-04 11:52:10 +05301200 arm_smmu_rpm_get(smmu);
1201 ret = ops->map(ops, iova, paddr, size, prot);
1202 arm_smmu_rpm_put(smmu);
1203
1204 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001205}
1206
1207static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
Will Deacon56f8af52019-07-02 16:44:06 +01001208 size_t size, struct iommu_iotlb_gather *gather)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001209{
Robin Murphy523d7422017-06-22 16:53:56 +01001210 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301211 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1212 size_t ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001213
Will Deacon518f7132014-11-14 17:17:54 +00001214 if (!ops)
1215 return 0;
1216
Sricharan Rd4a44f02018-12-04 11:52:10 +05301217 arm_smmu_rpm_get(smmu);
Will Deacona2d3a382019-07-02 16:44:58 +01001218 ret = ops->unmap(ops, iova, size, gather);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301219 arm_smmu_rpm_put(smmu);
1220
1221 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001222}
1223
Robin Murphy44f68762018-09-20 17:10:27 +01001224static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
1225{
1226 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301227 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy44f68762018-09-20 17:10:27 +01001228
Will Deaconabfd6fe2019-07-02 16:44:41 +01001229 if (smmu_domain->flush_ops) {
Sricharan Rd4a44f02018-12-04 11:52:10 +05301230 arm_smmu_rpm_get(smmu);
Robin Murphy696bcfb2019-09-18 17:17:51 +01001231 smmu_domain->flush_ops->tlb_flush_all(smmu_domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301232 arm_smmu_rpm_put(smmu);
1233 }
Robin Murphy44f68762018-09-20 17:10:27 +01001234}
1235
Will Deacon56f8af52019-07-02 16:44:06 +01001236static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
1237 struct iommu_iotlb_gather *gather)
Robin Murphy32b12442017-09-28 15:55:01 +01001238{
1239 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301240 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy32b12442017-09-28 15:55:01 +01001241
Robin Murphyae2b60f2019-09-18 17:17:50 +01001242 if (!smmu)
1243 return;
1244
1245 arm_smmu_rpm_get(smmu);
1246 if (smmu->version == ARM_SMMU_V2 ||
1247 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1248 arm_smmu_tlb_sync_context(smmu_domain);
1249 else
1250 arm_smmu_tlb_sync_global(smmu);
1251 arm_smmu_rpm_put(smmu);
Robin Murphy32b12442017-09-28 15:55:01 +01001252}
1253
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001254static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1255 dma_addr_t iova)
1256{
Joerg Roedel1d672632015-03-26 13:43:10 +01001257 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001258 struct arm_smmu_device *smmu = smmu_domain->smmu;
1259 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1260 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1261 struct device *dev = smmu->dev;
Robin Murphy19713fd2019-08-15 19:37:30 +01001262 void __iomem *reg;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001263 u32 tmp;
1264 u64 phys;
Robin Murphy523d7422017-06-22 16:53:56 +01001265 unsigned long va, flags;
Robin Murphy19713fd2019-08-15 19:37:30 +01001266 int ret, idx = cfg->cbndx;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301267
1268 ret = arm_smmu_rpm_get(smmu);
1269 if (ret < 0)
1270 return 0;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001271
Robin Murphy523d7422017-06-22 16:53:56 +01001272 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy661d9622015-05-27 17:09:34 +01001273 va = iova & ~0xfffUL;
Robin Murphy61005762019-08-15 19:37:28 +01001274 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy19713fd2019-08-15 19:37:30 +01001275 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
Robin Murphy61005762019-08-15 19:37:28 +01001276 else
Robin Murphy19713fd2019-08-15 19:37:30 +01001277 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001278
Robin Murphy19713fd2019-08-15 19:37:30 +01001279 reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR;
1280 if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ATSR_ACTIVE), 5, 50)) {
Robin Murphy523d7422017-06-22 16:53:56 +01001281 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001282 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001283 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001284 &iova);
1285 return ops->iova_to_phys(ops, iova);
1286 }
1287
Robin Murphy19713fd2019-08-15 19:37:30 +01001288 phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR);
Robin Murphy523d7422017-06-22 16:53:56 +01001289 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001290 if (phys & CB_PAR_F) {
1291 dev_err(dev, "translation fault!\n");
1292 dev_err(dev, "PAR = 0x%llx\n", phys);
1293 return 0;
1294 }
1295
Sricharan Rd4a44f02018-12-04 11:52:10 +05301296 arm_smmu_rpm_put(smmu);
1297
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001298 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1299}
1300
Will Deacon45ae7cf2013-06-24 18:31:25 +01001301static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001302 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001303{
Joerg Roedel1d672632015-03-26 13:43:10 +01001304 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Robin Murphy523d7422017-06-22 16:53:56 +01001305 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001306
Sunil Gouthambdf95922017-04-25 15:27:52 +05301307 if (domain->type == IOMMU_DOMAIN_IDENTITY)
1308 return iova;
1309
Will Deacon518f7132014-11-14 17:17:54 +00001310 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001311 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001312
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001313 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
Robin Murphy523d7422017-06-22 16:53:56 +01001314 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1315 return arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001316
Robin Murphy523d7422017-06-22 16:53:56 +01001317 return ops->iova_to_phys(ops, iova);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001318}
1319
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001320static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001321{
Will Deacond0948942014-06-24 17:30:10 +01001322 switch (cap) {
1323 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001324 /*
1325 * Return true here as the SMMU can always send out coherent
1326 * requests.
1327 */
1328 return true;
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001329 case IOMMU_CAP_NOEXEC:
1330 return true;
Will Deacond0948942014-06-24 17:30:10 +01001331 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001332 return false;
Will Deacond0948942014-06-24 17:30:10 +01001333 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001334}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001335
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001336static
1337struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
Robin Murphy021bb842016-09-14 15:26:46 +01001338{
Suzuki K Poulose67843bb2019-07-23 23:18:34 +01001339 struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver,
1340 fwnode);
Robin Murphy021bb842016-09-14 15:26:46 +01001341 put_device(dev);
1342 return dev ? dev_get_drvdata(dev) : NULL;
1343}
1344
Will Deacon03edb222015-01-19 14:27:33 +00001345static int arm_smmu_add_device(struct device *dev)
1346{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001347 struct arm_smmu_device *smmu;
Robin Murphyf80cd882016-09-14 15:21:39 +01001348 struct arm_smmu_master_cfg *cfg;
Joerg Roedel9b468f72018-11-29 14:01:00 +01001349 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001350 int i, ret;
1351
Robin Murphy021bb842016-09-14 15:26:46 +01001352 if (using_legacy_binding) {
1353 ret = arm_smmu_register_legacy_master(dev, &smmu);
Artem Savkova7990c62017-08-08 12:26:02 +02001354
1355 /*
1356 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
1357 * will allocate/initialise a new one. Thus we need to update fwspec for
1358 * later use.
1359 */
Joerg Roedel9b468f72018-11-29 14:01:00 +01001360 fwspec = dev_iommu_fwspec_get(dev);
Robin Murphy021bb842016-09-14 15:26:46 +01001361 if (ret)
1362 goto out_free;
Robin Murphy3c117b52016-11-02 17:31:32 +00001363 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001364 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
Robin Murphy021bb842016-09-14 15:26:46 +01001365 } else {
1366 return -ENODEV;
1367 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001368
1369 ret = -EINVAL;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001370 for (i = 0; i < fwspec->num_ids; i++) {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001371 u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]);
1372 u16 mask = FIELD_GET(SMR_MASK, fwspec->ids[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01001373
Robin Murphyadfec2e2016-09-12 17:13:55 +01001374 if (sid & ~smmu->streamid_mask) {
Robin Murphyf80cd882016-09-14 15:21:39 +01001375 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy021bb842016-09-14 15:26:46 +01001376 sid, smmu->streamid_mask);
1377 goto out_free;
1378 }
1379 if (mask & ~smmu->smr_mask_mask) {
1380 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
Peng Fan6323f472017-04-21 17:03:36 +08001381 mask, smmu->smr_mask_mask);
Robin Murphyf80cd882016-09-14 15:21:39 +01001382 goto out_free;
1383 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001384 }
Will Deacon03edb222015-01-19 14:27:33 +00001385
Robin Murphyadfec2e2016-09-12 17:13:55 +01001386 ret = -ENOMEM;
1387 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1388 GFP_KERNEL);
1389 if (!cfg)
1390 goto out_free;
1391
1392 cfg->smmu = smmu;
1393 fwspec->iommu_priv = cfg;
1394 while (i--)
1395 cfg->smendx[i] = INVALID_SMENDX;
1396
Sricharan Rd4a44f02018-12-04 11:52:10 +05301397 ret = arm_smmu_rpm_get(smmu);
1398 if (ret < 0)
1399 goto out_cfg_free;
1400
Robin Murphy588888a2016-09-12 17:13:54 +01001401 ret = arm_smmu_master_alloc_smes(dev);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301402 arm_smmu_rpm_put(smmu);
1403
Robin Murphyadfec2e2016-09-12 17:13:55 +01001404 if (ret)
Vivek Gautamc54451a2017-07-06 15:07:00 +05301405 goto out_cfg_free;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001406
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001407 iommu_device_link(&smmu->iommu, dev);
1408
Sricharan R655e3642018-12-04 11:52:11 +05301409 device_link_add(dev, smmu->dev,
1410 DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
1411
Robin Murphyadfec2e2016-09-12 17:13:55 +01001412 return 0;
Robin Murphyf80cd882016-09-14 15:21:39 +01001413
Vivek Gautamc54451a2017-07-06 15:07:00 +05301414out_cfg_free:
1415 kfree(cfg);
Robin Murphyf80cd882016-09-14 15:21:39 +01001416out_free:
Robin Murphyadfec2e2016-09-12 17:13:55 +01001417 iommu_fwspec_free(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001418 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00001419}
1420
Will Deacon45ae7cf2013-06-24 18:31:25 +01001421static void arm_smmu_remove_device(struct device *dev)
1422{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001423 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001424 struct arm_smmu_master_cfg *cfg;
1425 struct arm_smmu_device *smmu;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301426 int ret;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001427
Robin Murphyadfec2e2016-09-12 17:13:55 +01001428 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyf80cd882016-09-14 15:21:39 +01001429 return;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001430
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001431 cfg = fwspec->iommu_priv;
1432 smmu = cfg->smmu;
1433
Sricharan Rd4a44f02018-12-04 11:52:10 +05301434 ret = arm_smmu_rpm_get(smmu);
1435 if (ret < 0)
1436 return;
1437
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001438 iommu_device_unlink(&smmu->iommu, dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001439 arm_smmu_master_free_smes(fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301440
1441 arm_smmu_rpm_put(smmu);
1442
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001443 iommu_group_remove_device(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001444 kfree(fwspec->iommu_priv);
1445 iommu_fwspec_free(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001446}
1447
Joerg Roedelaf659932015-10-21 23:51:41 +02001448static struct iommu_group *arm_smmu_device_group(struct device *dev)
1449{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001450 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001451 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy588888a2016-09-12 17:13:54 +01001452 struct iommu_group *group = NULL;
1453 int i, idx;
1454
Robin Murphyadfec2e2016-09-12 17:13:55 +01001455 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001456 if (group && smmu->s2crs[idx].group &&
1457 group != smmu->s2crs[idx].group)
1458 return ERR_PTR(-EINVAL);
1459
1460 group = smmu->s2crs[idx].group;
1461 }
1462
1463 if (group)
Robin Murphye1b44cb2016-11-11 17:59:22 +00001464 return iommu_group_ref_get(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001465
1466 if (dev_is_pci(dev))
1467 group = pci_device_group(dev);
Nipun Guptaeab03e22018-09-10 19:19:18 +05301468 else if (dev_is_fsl_mc(dev))
1469 group = fsl_mc_device_group(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001470 else
1471 group = generic_device_group(dev);
1472
Joerg Roedelaf659932015-10-21 23:51:41 +02001473 return group;
1474}
1475
Will Deaconc752ce42014-06-25 22:46:31 +01001476static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1477 enum iommu_attr attr, void *data)
1478{
Joerg Roedel1d672632015-03-26 13:43:10 +01001479 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001480
Robin Murphy44f68762018-09-20 17:10:27 +01001481 switch(domain->type) {
1482 case IOMMU_DOMAIN_UNMANAGED:
1483 switch (attr) {
1484 case DOMAIN_ATTR_NESTING:
1485 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1486 return 0;
1487 default:
1488 return -ENODEV;
1489 }
1490 break;
1491 case IOMMU_DOMAIN_DMA:
1492 switch (attr) {
1493 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1494 *(int *)data = smmu_domain->non_strict;
1495 return 0;
1496 default:
1497 return -ENODEV;
1498 }
1499 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001500 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001501 return -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001502 }
1503}
1504
1505static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1506 enum iommu_attr attr, void *data)
1507{
Will Deacon518f7132014-11-14 17:17:54 +00001508 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001509 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001510
Will Deacon518f7132014-11-14 17:17:54 +00001511 mutex_lock(&smmu_domain->init_mutex);
1512
Robin Murphy44f68762018-09-20 17:10:27 +01001513 switch(domain->type) {
1514 case IOMMU_DOMAIN_UNMANAGED:
1515 switch (attr) {
1516 case DOMAIN_ATTR_NESTING:
1517 if (smmu_domain->smmu) {
1518 ret = -EPERM;
1519 goto out_unlock;
1520 }
1521
1522 if (*(int *)data)
1523 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1524 else
1525 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1526 break;
1527 default:
1528 ret = -ENODEV;
Will Deacon518f7132014-11-14 17:17:54 +00001529 }
Robin Murphy44f68762018-09-20 17:10:27 +01001530 break;
1531 case IOMMU_DOMAIN_DMA:
1532 switch (attr) {
1533 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1534 smmu_domain->non_strict = *(int *)data;
1535 break;
1536 default:
1537 ret = -ENODEV;
1538 }
Will Deacon518f7132014-11-14 17:17:54 +00001539 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001540 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001541 ret = -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001542 }
Will Deacon518f7132014-11-14 17:17:54 +00001543out_unlock:
1544 mutex_unlock(&smmu_domain->init_mutex);
1545 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001546}
1547
Robin Murphy021bb842016-09-14 15:26:46 +01001548static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1549{
Robin Murphy56fbf602017-03-31 12:03:33 +01001550 u32 mask, fwid = 0;
Robin Murphy021bb842016-09-14 15:26:46 +01001551
1552 if (args->args_count > 0)
Robin Murphy0caf5f42019-08-15 19:37:23 +01001553 fwid |= FIELD_PREP(SMR_ID, args->args[0]);
Robin Murphy021bb842016-09-14 15:26:46 +01001554
1555 if (args->args_count > 1)
Robin Murphy0caf5f42019-08-15 19:37:23 +01001556 fwid |= FIELD_PREP(SMR_MASK, args->args[1]);
Robin Murphy56fbf602017-03-31 12:03:33 +01001557 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
Robin Murphy0caf5f42019-08-15 19:37:23 +01001558 fwid |= FIELD_PREP(SMR_MASK, mask);
Robin Murphy021bb842016-09-14 15:26:46 +01001559
1560 return iommu_fwspec_add_ids(dev, &fwid, 1);
1561}
1562
Eric Augerf3ebee82017-01-19 20:57:55 +00001563static void arm_smmu_get_resv_regions(struct device *dev,
1564 struct list_head *head)
1565{
1566 struct iommu_resv_region *region;
1567 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1568
1569 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00001570 prot, IOMMU_RESV_SW_MSI);
Eric Augerf3ebee82017-01-19 20:57:55 +00001571 if (!region)
1572 return;
1573
1574 list_add_tail(&region->list, head);
Robin Murphy273df962017-03-16 17:00:19 +00001575
1576 iommu_dma_get_resv_regions(dev, head);
Eric Augerf3ebee82017-01-19 20:57:55 +00001577}
1578
1579static void arm_smmu_put_resv_regions(struct device *dev,
1580 struct list_head *head)
1581{
1582 struct iommu_resv_region *entry, *next;
1583
1584 list_for_each_entry_safe(entry, next, head, list)
1585 kfree(entry);
1586}
1587
Will Deacon518f7132014-11-14 17:17:54 +00001588static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001589 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001590 .domain_alloc = arm_smmu_domain_alloc,
1591 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001592 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001593 .map = arm_smmu_map,
1594 .unmap = arm_smmu_unmap,
Robin Murphy44f68762018-09-20 17:10:27 +01001595 .flush_iotlb_all = arm_smmu_flush_iotlb_all,
Robin Murphy32b12442017-09-28 15:55:01 +01001596 .iotlb_sync = arm_smmu_iotlb_sync,
Will Deaconc752ce42014-06-25 22:46:31 +01001597 .iova_to_phys = arm_smmu_iova_to_phys,
1598 .add_device = arm_smmu_add_device,
1599 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001600 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001601 .domain_get_attr = arm_smmu_domain_get_attr,
1602 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy021bb842016-09-14 15:26:46 +01001603 .of_xlate = arm_smmu_of_xlate,
Eric Augerf3ebee82017-01-19 20:57:55 +00001604 .get_resv_regions = arm_smmu_get_resv_regions,
1605 .put_resv_regions = arm_smmu_put_resv_regions,
Will Deacon518f7132014-11-14 17:17:54 +00001606 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001607};
1608
1609static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1610{
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001611 int i;
Robin Murphy62b993a2019-08-15 19:37:36 +01001612 u32 reg;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001613
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001614 /* clear global FSR */
Robin Murphy00320ce2019-08-15 19:37:31 +01001615 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
1616 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001617
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001618 /*
1619 * Reset stream mapping groups: Initial values mark all SMRn as
1620 * invalid and all S2CRn as bypass unless overridden.
1621 */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001622 for (i = 0; i < smmu->num_mapping_groups; ++i)
1623 arm_smmu_write_sme(smmu, i);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001624
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001625 /* Make sure all context banks are disabled and clear CB_FSR */
1626 for (i = 0; i < smmu->num_context_banks; ++i) {
Robin Murphy90df3732017-08-08 14:56:14 +01001627 arm_smmu_write_context_bank(smmu, i);
Robin Murphy19713fd2019-08-15 19:37:30 +01001628 arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, FSR_FAULT);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001629 }
Will Deacon1463fe42013-07-31 19:21:27 +01001630
Will Deacon45ae7cf2013-06-24 18:31:25 +01001631 /* Invalidate the TLB, just in case */
Robin Murphy00320ce2019-08-15 19:37:31 +01001632 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLH, QCOM_DUMMY_VAL);
1633 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLNSNH, QCOM_DUMMY_VAL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001634
Robin Murphy00320ce2019-08-15 19:37:31 +01001635 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001636
Will Deacon45ae7cf2013-06-24 18:31:25 +01001637 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001638 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001639
1640 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001641 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001642
Robin Murphy25a1c962016-02-10 14:25:33 +00001643 /* Enable client access, handling unmatched streams as appropriate */
1644 reg &= ~sCR0_CLIENTPD;
1645 if (disable_bypass)
1646 reg |= sCR0_USFCFG;
1647 else
1648 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001649
1650 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001651 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001652
1653 /* Don't upgrade barriers */
Robin Murphy0caf5f42019-08-15 19:37:23 +01001654 reg &= ~(sCR0_BSU);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001655
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001656 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1657 reg |= sCR0_VMID16EN;
1658
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001659 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1660 reg |= sCR0_EXIDENABLE;
1661
Robin Murphy62b993a2019-08-15 19:37:36 +01001662 if (smmu->impl && smmu->impl->reset)
1663 smmu->impl->reset(smmu);
1664
Will Deacon45ae7cf2013-06-24 18:31:25 +01001665 /* Push the button */
Robin Murphy11febfc2017-03-30 17:56:31 +01001666 arm_smmu_tlb_sync_global(smmu);
Robin Murphy00320ce2019-08-15 19:37:31 +01001667 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001668}
1669
1670static int arm_smmu_id_size_to_bits(int size)
1671{
1672 switch (size) {
1673 case 0:
1674 return 32;
1675 case 1:
1676 return 36;
1677 case 2:
1678 return 40;
1679 case 3:
1680 return 42;
1681 case 4:
1682 return 44;
1683 case 5:
1684 default:
1685 return 48;
1686 }
1687}
1688
1689static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1690{
Robin Murphy490325e2019-08-15 19:37:26 +01001691 unsigned int size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001692 u32 id;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001693 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001694 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001695
1696 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001697 dev_notice(smmu->dev, "SMMUv%d with:\n",
1698 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001699
1700 /* ID0 */
Robin Murphy00320ce2019-08-15 19:37:31 +01001701 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001702
1703 /* Restrict available stages based on module parameter */
1704 if (force_stage == 1)
1705 id &= ~(ID0_S2TS | ID0_NTS);
1706 else if (force_stage == 2)
1707 id &= ~(ID0_S1TS | ID0_NTS);
1708
Will Deacon45ae7cf2013-06-24 18:31:25 +01001709 if (id & ID0_S1TS) {
1710 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1711 dev_notice(smmu->dev, "\tstage 1 translation\n");
1712 }
1713
1714 if (id & ID0_S2TS) {
1715 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1716 dev_notice(smmu->dev, "\tstage 2 translation\n");
1717 }
1718
1719 if (id & ID0_NTS) {
1720 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1721 dev_notice(smmu->dev, "\tnested translation\n");
1722 }
1723
1724 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001725 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001726 dev_err(smmu->dev, "\tno translation support!\n");
1727 return -ENODEV;
1728 }
1729
Robin Murphyb7862e32016-04-13 18:13:03 +01001730 if ((id & ID0_S1TS) &&
1731 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001732 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1733 dev_notice(smmu->dev, "\taddress translation ops\n");
1734 }
1735
Robin Murphybae2c2d2015-07-29 19:46:05 +01001736 /*
1737 * In order for DMA API calls to work properly, we must defer to what
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001738 * the FW says about coherency, regardless of what the hardware claims.
Robin Murphybae2c2d2015-07-29 19:46:05 +01001739 * Fortunately, this also opens up a workaround for systems where the
1740 * ID register value has ended up configured incorrectly.
1741 */
Robin Murphybae2c2d2015-07-29 19:46:05 +01001742 cttw_reg = !!(id & ID0_CTTW);
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001743 if (cttw_fw || cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001744 dev_notice(smmu->dev, "\t%scoherent table walk\n",
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001745 cttw_fw ? "" : "non-");
1746 if (cttw_fw != cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001747 dev_notice(smmu->dev,
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001748 "\t(IDR0.CTTW overridden by FW configuration)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001749
Robin Murphy21174242016-09-12 17:13:48 +01001750 /* Max. number of entries we have for stream matching/indexing */
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001751 if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
1752 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1753 size = 1 << 16;
1754 } else {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001755 size = 1 << FIELD_GET(ID0_NUMSIDB, id);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001756 }
Robin Murphy21174242016-09-12 17:13:48 +01001757 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001758 if (id & ID0_SMS) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001759 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy0caf5f42019-08-15 19:37:23 +01001760 size = FIELD_GET(ID0_NUMSMRG, id);
Robin Murphy21174242016-09-12 17:13:48 +01001761 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001762 dev_err(smmu->dev,
1763 "stream-matching supported, but no SMRs present!\n");
1764 return -ENODEV;
1765 }
1766
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001767 /* Zero-initialised to mark as invalid */
1768 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1769 GFP_KERNEL);
1770 if (!smmu->smrs)
1771 return -ENOMEM;
1772
Will Deacon45ae7cf2013-06-24 18:31:25 +01001773 dev_notice(smmu->dev,
Robin Murphy490325e2019-08-15 19:37:26 +01001774 "\tstream matching with %u register groups", size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001775 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001776 /* s2cr->type == 0 means translation, so initialise explicitly */
1777 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1778 GFP_KERNEL);
1779 if (!smmu->s2crs)
1780 return -ENOMEM;
1781 for (i = 0; i < size; i++)
1782 smmu->s2crs[i] = s2cr_init_val;
1783
Robin Murphy21174242016-09-12 17:13:48 +01001784 smmu->num_mapping_groups = size;
Robin Murphy588888a2016-09-12 17:13:54 +01001785 mutex_init(&smmu->stream_map_mutex);
Will Deacon8e517e72017-07-06 15:55:48 +01001786 spin_lock_init(&smmu->global_sync_lock);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001787
Robin Murphy7602b872016-04-28 17:12:09 +01001788 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1789 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1790 if (!(id & ID0_PTFS_NO_AARCH32S))
1791 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1792 }
1793
Will Deacon45ae7cf2013-06-24 18:31:25 +01001794 /* ID1 */
Robin Murphy00320ce2019-08-15 19:37:31 +01001795 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001796 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001797
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001798 /* Check for size mismatch of SMMU address space from mapped region */
Robin Murphy0caf5f42019-08-15 19:37:23 +01001799 size = 1 << (FIELD_GET(ID1_NUMPAGENDXB, id) + 1);
Robin Murphy490325e2019-08-15 19:37:26 +01001800 if (smmu->numpage != 2 * size << smmu->pgshift)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001801 dev_warn(smmu->dev,
Robin Murphy490325e2019-08-15 19:37:26 +01001802 "SMMU address space size (0x%x) differs from mapped region size (0x%x)!\n",
1803 2 * size << smmu->pgshift, smmu->numpage);
1804 /* Now properly encode NUMPAGE to subsequently derive SMMU_CB_BASE */
1805 smmu->numpage = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001806
Robin Murphy0caf5f42019-08-15 19:37:23 +01001807 smmu->num_s2_context_banks = FIELD_GET(ID1_NUMS2CB, id);
1808 smmu->num_context_banks = FIELD_GET(ID1_NUMCB, id);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001809 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1810 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1811 return -ENODEV;
1812 }
1813 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1814 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphy90df3732017-08-08 14:56:14 +01001815 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
1816 sizeof(*smmu->cbs), GFP_KERNEL);
1817 if (!smmu->cbs)
1818 return -ENOMEM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001819
1820 /* ID2 */
Robin Murphy00320ce2019-08-15 19:37:31 +01001821 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2);
Robin Murphy0caf5f42019-08-15 19:37:23 +01001822 size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_IAS, id));
Will Deacon518f7132014-11-14 17:17:54 +00001823 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001824
Will Deacon518f7132014-11-14 17:17:54 +00001825 /* The output mask is also applied for bypass */
Robin Murphy0caf5f42019-08-15 19:37:23 +01001826 size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_OAS, id));
Will Deacon518f7132014-11-14 17:17:54 +00001827 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001828
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001829 if (id & ID2_VMID16)
1830 smmu->features |= ARM_SMMU_FEAT_VMID16;
1831
Robin Murphyf1d84542015-03-04 16:41:05 +00001832 /*
1833 * What the page table walker can address actually depends on which
1834 * descriptor format is in use, but since a) we don't know that yet,
1835 * and b) it can vary per context bank, this will have to do...
1836 */
1837 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1838 dev_warn(smmu->dev,
1839 "failed to set DMA mask for table walker\n");
1840
Robin Murphyb7862e32016-04-13 18:13:03 +01001841 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001842 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001843 if (smmu->version == ARM_SMMU_V1_64K)
1844 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001845 } else {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001846 size = FIELD_GET(ID2_UBS, id);
Will Deacon518f7132014-11-14 17:17:54 +00001847 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001848 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001849 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001850 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001851 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001852 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001853 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001854 }
1855
Robin Murphy7602b872016-04-28 17:12:09 +01001856 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01001857 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01001858 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01001859 if (smmu->features &
1860 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01001861 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01001862 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01001863 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01001864 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01001865 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01001866
Robin Murphyd5466352016-05-09 17:20:09 +01001867 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1868 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1869 else
1870 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1871 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1872 smmu->pgsize_bitmap);
1873
Will Deacon518f7132014-11-14 17:17:54 +00001874
Will Deacon28d60072014-09-01 16:24:48 +01001875 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1876 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001877 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001878
1879 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1880 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001881 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001882
Robin Murphy3995e182019-08-15 19:37:35 +01001883 if (smmu->impl && smmu->impl->cfg_probe)
1884 return smmu->impl->cfg_probe(smmu);
1885
Will Deacon45ae7cf2013-06-24 18:31:25 +01001886 return 0;
1887}
1888
Robin Murphy67b65a32016-04-13 18:12:57 +01001889struct arm_smmu_match_data {
1890 enum arm_smmu_arch_version version;
1891 enum arm_smmu_implementation model;
1892};
1893
1894#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
Sricharan R96a299d2018-12-04 11:52:09 +05301895static const struct arm_smmu_match_data name = { .version = ver, .model = imp }
Robin Murphy67b65a32016-04-13 18:12:57 +01001896
1897ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1898ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01001899ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001900ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01001901ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Vivek Gautam89cddc52018-12-04 11:52:13 +05301902ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01001903
Joerg Roedel09b52692014-10-02 12:24:45 +02001904static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01001905 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1906 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1907 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01001908 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001909 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01001910 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Vivek Gautam89cddc52018-12-04 11:52:13 +05301911 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01001912 { },
1913};
Robin Murphy09360402014-08-28 17:51:59 +01001914
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001915#ifdef CONFIG_ACPI
1916static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
1917{
1918 int ret = 0;
1919
1920 switch (model) {
1921 case ACPI_IORT_SMMU_V1:
1922 case ACPI_IORT_SMMU_CORELINK_MMU400:
1923 smmu->version = ARM_SMMU_V1;
1924 smmu->model = GENERIC_SMMU;
1925 break;
Robin Murphy84c24372017-06-19 16:41:56 +01001926 case ACPI_IORT_SMMU_CORELINK_MMU401:
1927 smmu->version = ARM_SMMU_V1_64K;
1928 smmu->model = GENERIC_SMMU;
1929 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001930 case ACPI_IORT_SMMU_V2:
1931 smmu->version = ARM_SMMU_V2;
1932 smmu->model = GENERIC_SMMU;
1933 break;
1934 case ACPI_IORT_SMMU_CORELINK_MMU500:
1935 smmu->version = ARM_SMMU_V2;
1936 smmu->model = ARM_MMU500;
1937 break;
Robin Murphy84c24372017-06-19 16:41:56 +01001938 case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
1939 smmu->version = ARM_SMMU_V2;
1940 smmu->model = CAVIUM_SMMUV2;
1941 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001942 default:
1943 ret = -ENODEV;
1944 }
1945
1946 return ret;
1947}
1948
1949static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
1950 struct arm_smmu_device *smmu)
1951{
1952 struct device *dev = smmu->dev;
1953 struct acpi_iort_node *node =
1954 *(struct acpi_iort_node **)dev_get_platdata(dev);
1955 struct acpi_iort_smmu *iort_smmu;
1956 int ret;
1957
1958 /* Retrieve SMMU1/2 specific data */
1959 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
1960
1961 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
1962 if (ret < 0)
1963 return ret;
1964
1965 /* Ignore the configuration access interrupt */
1966 smmu->num_global_irqs = 1;
1967
1968 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
1969 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1970
1971 return 0;
1972}
1973#else
1974static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
1975 struct arm_smmu_device *smmu)
1976{
1977 return -ENODEV;
1978}
1979#endif
1980
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001981static int arm_smmu_device_dt_probe(struct platform_device *pdev,
1982 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001983{
Robin Murphy67b65a32016-04-13 18:12:57 +01001984 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001985 struct device *dev = &pdev->dev;
Robin Murphy021bb842016-09-14 15:26:46 +01001986 bool legacy_binding;
1987
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001988 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1989 &smmu->num_global_irqs)) {
1990 dev_err(dev, "missing #global-interrupts property\n");
1991 return -ENODEV;
1992 }
1993
1994 data = of_device_get_match_data(dev);
1995 smmu->version = data->version;
1996 smmu->model = data->model;
1997
Robin Murphy021bb842016-09-14 15:26:46 +01001998 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
1999 if (legacy_binding && !using_generic_binding) {
2000 if (!using_legacy_binding)
2001 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
2002 using_legacy_binding = true;
2003 } else if (!legacy_binding && !using_legacy_binding) {
2004 using_generic_binding = true;
2005 } else {
2006 dev_err(dev, "not probing due to mismatched DT properties\n");
2007 return -ENODEV;
2008 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002009
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002010 if (of_dma_is_coherent(dev->of_node))
2011 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2012
2013 return 0;
2014}
2015
Robin Murphyf6810c12017-04-10 16:51:05 +05302016static void arm_smmu_bus_init(void)
2017{
2018 /* Oh, for a proper bus abstraction */
2019 if (!iommu_present(&platform_bus_type))
2020 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2021#ifdef CONFIG_ARM_AMBA
2022 if (!iommu_present(&amba_bustype))
2023 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2024#endif
2025#ifdef CONFIG_PCI
2026 if (!iommu_present(&pci_bus_type)) {
2027 pci_request_acs();
2028 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2029 }
2030#endif
Nipun Guptaeab03e22018-09-10 19:19:18 +05302031#ifdef CONFIG_FSL_MC_BUS
2032 if (!iommu_present(&fsl_mc_bus_type))
2033 bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
2034#endif
Robin Murphyf6810c12017-04-10 16:51:05 +05302035}
2036
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002037static int arm_smmu_device_probe(struct platform_device *pdev)
2038{
2039 struct resource *res;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002040 resource_size_t ioaddr;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002041 struct arm_smmu_device *smmu;
2042 struct device *dev = &pdev->dev;
2043 int num_irqs, i, err;
2044
Will Deacon45ae7cf2013-06-24 18:31:25 +01002045 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2046 if (!smmu) {
2047 dev_err(dev, "failed to allocate arm_smmu_device\n");
2048 return -ENOMEM;
2049 }
2050 smmu->dev = dev;
2051
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002052 if (dev->of_node)
2053 err = arm_smmu_device_dt_probe(pdev, smmu);
2054 else
2055 err = arm_smmu_device_acpi_probe(pdev, smmu);
2056
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002057 if (err)
2058 return err;
Robin Murphy09360402014-08-28 17:51:59 +01002059
Robin Murphyfc058d32019-08-15 19:37:33 +01002060 smmu = arm_smmu_impl_init(smmu);
2061 if (IS_ERR(smmu))
2062 return PTR_ERR(smmu);
2063
Will Deacon45ae7cf2013-06-24 18:31:25 +01002064 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002065 ioaddr = res->start;
Julia Lawall8a7f4312013-08-19 12:20:37 +01002066 smmu->base = devm_ioremap_resource(dev, res);
2067 if (IS_ERR(smmu->base))
2068 return PTR_ERR(smmu->base);
Robin Murphy490325e2019-08-15 19:37:26 +01002069 /*
2070 * The resource size should effectively match the value of SMMU_TOP;
2071 * stash that temporarily until we know PAGESIZE to validate it with.
2072 */
2073 smmu->numpage = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002074
Will Deacon45ae7cf2013-06-24 18:31:25 +01002075 num_irqs = 0;
2076 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2077 num_irqs++;
2078 if (num_irqs > smmu->num_global_irqs)
2079 smmu->num_context_irqs++;
2080 }
2081
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002082 if (!smmu->num_context_irqs) {
2083 dev_err(dev, "found %d interrupts but expected at least %d\n",
2084 num_irqs, smmu->num_global_irqs + 1);
2085 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002086 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002087
Kees Cooka86854d2018-06-12 14:07:58 -07002088 smmu->irqs = devm_kcalloc(dev, num_irqs, sizeof(*smmu->irqs),
Will Deacon45ae7cf2013-06-24 18:31:25 +01002089 GFP_KERNEL);
2090 if (!smmu->irqs) {
2091 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2092 return -ENOMEM;
2093 }
2094
2095 for (i = 0; i < num_irqs; ++i) {
2096 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002097
Will Deacon45ae7cf2013-06-24 18:31:25 +01002098 if (irq < 0) {
2099 dev_err(dev, "failed to get irq index %d\n", i);
2100 return -ENODEV;
2101 }
2102 smmu->irqs[i] = irq;
2103 }
2104
Sricharan R96a299d2018-12-04 11:52:09 +05302105 err = devm_clk_bulk_get_all(dev, &smmu->clks);
2106 if (err < 0) {
2107 dev_err(dev, "failed to get clocks %d\n", err);
2108 return err;
2109 }
2110 smmu->num_clks = err;
2111
2112 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
2113 if (err)
2114 return err;
2115
Olav Haugan3c8766d2014-08-22 17:12:32 -07002116 err = arm_smmu_device_cfg_probe(smmu);
2117 if (err)
2118 return err;
2119
Vivek Gautamd1e20222018-07-19 23:23:56 +05302120 if (smmu->version == ARM_SMMU_V2) {
2121 if (smmu->num_context_banks > smmu->num_context_irqs) {
2122 dev_err(dev,
2123 "found only %d context irq(s) but %d required\n",
2124 smmu->num_context_irqs, smmu->num_context_banks);
2125 return -ENODEV;
2126 }
2127
2128 /* Ignore superfluous interrupts */
2129 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002130 }
2131
Will Deacon45ae7cf2013-06-24 18:31:25 +01002132 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08002133 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2134 arm_smmu_global_fault,
2135 IRQF_SHARED,
2136 "arm-smmu global fault",
2137 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002138 if (err) {
2139 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2140 i, smmu->irqs[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01002141 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002142 }
2143 }
2144
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002145 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2146 "smmu.%pa", &ioaddr);
2147 if (err) {
2148 dev_err(dev, "Failed to register iommu in sysfs\n");
2149 return err;
2150 }
2151
2152 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2153 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2154
2155 err = iommu_device_register(&smmu->iommu);
2156 if (err) {
2157 dev_err(dev, "Failed to register iommu\n");
2158 return err;
2159 }
2160
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002161 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01002162 arm_smmu_device_reset(smmu);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03002163 arm_smmu_test_smr_masks(smmu);
Robin Murphy021bb842016-09-14 15:26:46 +01002164
Robin Murphyf6810c12017-04-10 16:51:05 +05302165 /*
Sricharan Rd4a44f02018-12-04 11:52:10 +05302166 * We want to avoid touching dev->power.lock in fastpaths unless
2167 * it's really going to do something useful - pm_runtime_enabled()
2168 * can serve as an ideal proxy for that decision. So, conditionally
2169 * enable pm_runtime.
2170 */
2171 if (dev->pm_domain) {
2172 pm_runtime_set_active(dev);
2173 pm_runtime_enable(dev);
2174 }
2175
2176 /*
Robin Murphyf6810c12017-04-10 16:51:05 +05302177 * For ACPI and generic DT bindings, an SMMU will be probed before
2178 * any device which might need it, so we want the bus ops in place
2179 * ready to handle default domain setup as soon as any SMMU exists.
2180 */
2181 if (!using_legacy_binding)
2182 arm_smmu_bus_init();
2183
Will Deacon45ae7cf2013-06-24 18:31:25 +01002184 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002185}
2186
Robin Murphyf6810c12017-04-10 16:51:05 +05302187/*
2188 * With the legacy DT binding in play, though, we have no guarantees about
2189 * probe order, but then we're also not doing default domains, so we can
2190 * delay setting bus ops until we're sure every possible SMMU is ready,
2191 * and that way ensure that no add_device() calls get missed.
2192 */
2193static int arm_smmu_legacy_bus_init(void)
2194{
2195 if (using_legacy_binding)
2196 arm_smmu_bus_init();
2197 return 0;
2198}
2199device_initcall_sync(arm_smmu_legacy_bus_init);
2200
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002201static void arm_smmu_device_shutdown(struct platform_device *pdev)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002202{
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002203 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002204
2205 if (!smmu)
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002206 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002207
Will Deaconecfadb62013-07-31 19:21:28 +01002208 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002209 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002210
Sricharan Rd4a44f02018-12-04 11:52:10 +05302211 arm_smmu_rpm_get(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002212 /* Turn the thing off */
Robin Murphy00320ce2019-08-15 19:37:31 +01002213 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, sCR0_CLIENTPD);
Sricharan Rd4a44f02018-12-04 11:52:10 +05302214 arm_smmu_rpm_put(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302215
Sricharan Rd4a44f02018-12-04 11:52:10 +05302216 if (pm_runtime_enabled(smmu->dev))
2217 pm_runtime_force_suspend(smmu->dev);
2218 else
2219 clk_bulk_disable(smmu->num_clks, smmu->clks);
2220
2221 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
Nate Watterson7aa86192017-06-29 18:18:15 -04002222}
2223
Sricharan R96a299d2018-12-04 11:52:09 +05302224static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
Robin Murphya2d866f2017-08-08 14:56:15 +01002225{
2226 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
Sricharan R96a299d2018-12-04 11:52:09 +05302227 int ret;
2228
2229 ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
2230 if (ret)
2231 return ret;
Robin Murphya2d866f2017-08-08 14:56:15 +01002232
2233 arm_smmu_device_reset(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302234
Will Deacon45ae7cf2013-06-24 18:31:25 +01002235 return 0;
2236}
2237
Sricharan R96a299d2018-12-04 11:52:09 +05302238static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
Dan Carpenter6614ee72013-08-21 09:34:20 +01002239{
Sricharan R96a299d2018-12-04 11:52:09 +05302240 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2241
2242 clk_bulk_disable(smmu->num_clks, smmu->clks);
2243
2244 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002245}
2246
Robin Murphya2d866f2017-08-08 14:56:15 +01002247static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
2248{
Sricharan R96a299d2018-12-04 11:52:09 +05302249 if (pm_runtime_suspended(dev))
2250 return 0;
Robin Murphya2d866f2017-08-08 14:56:15 +01002251
Sricharan R96a299d2018-12-04 11:52:09 +05302252 return arm_smmu_runtime_resume(dev);
Robin Murphya2d866f2017-08-08 14:56:15 +01002253}
2254
Sricharan R96a299d2018-12-04 11:52:09 +05302255static int __maybe_unused arm_smmu_pm_suspend(struct device *dev)
2256{
2257 if (pm_runtime_suspended(dev))
2258 return 0;
2259
2260 return arm_smmu_runtime_suspend(dev);
2261}
2262
2263static const struct dev_pm_ops arm_smmu_pm_ops = {
2264 SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_pm_suspend, arm_smmu_pm_resume)
2265 SET_RUNTIME_PM_OPS(arm_smmu_runtime_suspend,
2266 arm_smmu_runtime_resume, NULL)
2267};
Robin Murphya2d866f2017-08-08 14:56:15 +01002268
Will Deacon45ae7cf2013-06-24 18:31:25 +01002269static struct platform_driver arm_smmu_driver = {
2270 .driver = {
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002271 .name = "arm-smmu",
2272 .of_match_table = of_match_ptr(arm_smmu_of_match),
2273 .pm = &arm_smmu_pm_ops,
2274 .suppress_bind_attrs = true,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002275 },
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002276 .probe = arm_smmu_device_probe,
Nate Watterson7aa86192017-06-29 18:18:15 -04002277 .shutdown = arm_smmu_device_shutdown,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002278};
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002279builtin_platform_driver(arm_smmu_driver);