blob: 5b93c79371e98327ca20c9e7fb7a48db42f77a79 [file] [log] [blame]
Thomas Gleixner45051532019-05-29 16:57:47 -07001// SPDX-License-Identifier: GPL-2.0-only
Will Deacon45ae7cf2013-06-24 18:31:25 +01002/*
3 * IOMMU API for ARM architected SMMU implementations.
4 *
Will Deacon45ae7cf2013-06-24 18:31:25 +01005 * Copyright (C) 2013 ARM Limited
6 *
7 * Author: Will Deacon <will.deacon@arm.com>
8 *
9 * This driver currently supports:
10 * - SMMUv1 and v2 implementations
11 * - Stream-matching and stream-indexing
12 * - v7/v8 long-descriptor format
13 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010014 * - Context fault reporting
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +030015 * - Extended Stream ID (16 bit)
Will Deacon45ae7cf2013-06-24 18:31:25 +010016 */
17
18#define pr_fmt(fmt) "arm-smmu: " fmt
19
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +000020#include <linux/acpi.h>
21#include <linux/acpi_iort.h>
Robin Murphy0caf5f42019-08-15 19:37:23 +010022#include <linux/bitfield.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010023#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000024#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010025#include <linux/dma-mapping.h>
26#include <linux/err.h>
27#include <linux/interrupt.h>
28#include <linux/io.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000029#include <linux/iopoll.h>
Paul Gortmakeraddb672f2018-12-01 14:19:16 -050030#include <linux/init.h>
31#include <linux/moduleparam.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010033#include <linux/of_address.h>
Robin Murphyd6fc5d92016-09-12 17:13:52 +010034#include <linux/of_device.h>
Robin Murphyadfec2e2016-09-12 17:13:55 +010035#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010036#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010037#include <linux/platform_device.h>
Sricharan R96a299d2018-12-04 11:52:09 +053038#include <linux/pm_runtime.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/slab.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010040
41#include <linux/amba/bus.h>
Nipun Guptaeab03e22018-09-10 19:19:18 +053042#include <linux/fsl/mc.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010043
Robin Murphyc5fc6482019-08-15 19:37:32 +010044#include "arm-smmu.h"
Rob Clark2b037742017-08-09 10:43:03 -040045
Robin Murphy4e4abae2019-06-03 14:15:37 +020046/*
47 * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
48 * global register space are still, in fact, using a hypervisor to mediate it
49 * by trapping and emulating register accesses. Sadly, some deployed versions
50 * of said trapping code have bugs wherein they go horribly wrong for stores
51 * using r31 (i.e. XZR/WZR) as the source register.
52 */
53#define QCOM_DUMMY_VAL -1
54
Rob Clark2b037742017-08-09 10:43:03 -040055#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
56#define TLB_SPIN_COUNT 10
Will Deacon45ae7cf2013-06-24 18:31:25 +010057
Eric Augerf3ebee82017-01-19 20:57:55 +000058#define MSI_IOVA_BASE 0x8000000
59#define MSI_IOVA_LENGTH 0x100000
60
Will Deacon4cf740b2014-07-14 19:47:39 +010061static int force_stage;
Paul Gortmakeraddb672f2018-12-01 14:19:16 -050062/*
63 * not really modular, but the easiest way to keep compat with existing
64 * bootargs behaviour is to continue using module_param() here.
65 */
Robin Murphy25a1c962016-02-10 14:25:33 +000066module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +010067MODULE_PARM_DESC(force_stage,
68 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Douglas Anderson954a03b2019-03-01 11:20:17 -080069static bool disable_bypass =
70 IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT);
Robin Murphy25a1c962016-02-10 14:25:33 +000071module_param(disable_bypass, bool, S_IRUGO);
72MODULE_PARM_DESC(disable_bypass,
73 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +010074
Robin Murphy8e8b2032016-09-12 17:13:50 +010075struct arm_smmu_s2cr {
Robin Murphy588888a2016-09-12 17:13:54 +010076 struct iommu_group *group;
77 int count;
Robin Murphy8e8b2032016-09-12 17:13:50 +010078 enum arm_smmu_s2cr_type type;
79 enum arm_smmu_s2cr_privcfg privcfg;
80 u8 cbndx;
81};
82
83#define s2cr_init_val (struct arm_smmu_s2cr){ \
84 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
85}
86
Will Deacon45ae7cf2013-06-24 18:31:25 +010087struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +010088 u16 mask;
89 u16 id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +010090 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +010091};
92
Robin Murphy90df3732017-08-08 14:56:14 +010093struct arm_smmu_cb {
94 u64 ttbr[2];
95 u32 tcr[2];
96 u32 mair[2];
97 struct arm_smmu_cfg *cfg;
98};
99
Will Deacona9a1b0b2014-05-01 18:05:08 +0100100struct arm_smmu_master_cfg {
Robin Murphyf80cd882016-09-14 15:21:39 +0100101 struct arm_smmu_device *smmu;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100102 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100103};
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100104#define INVALID_SMENDX -1
Robin Murphyadfec2e2016-09-12 17:13:55 +0100105#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
106#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000107#define fwspec_smendx(fw, i) \
108 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphyadfec2e2016-09-12 17:13:55 +0100109#define for_each_cfg_sme(fw, i, idx) \
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000110 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100111
Robin Murphy021bb842016-09-14 15:26:46 +0100112static bool using_legacy_binding, using_generic_binding;
113
Sricharan Rd4a44f02018-12-04 11:52:10 +0530114static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
115{
116 if (pm_runtime_enabled(smmu->dev))
117 return pm_runtime_get_sync(smmu->dev);
118
119 return 0;
120}
121
122static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
123{
124 if (pm_runtime_enabled(smmu->dev))
125 pm_runtime_put(smmu->dev);
126}
127
Joerg Roedel1d672632015-03-26 13:43:10 +0100128static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
129{
130 return container_of(dom, struct arm_smmu_domain, domain);
131}
132
Will Deacon8f68f8e2014-07-15 11:27:08 +0100133static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100134{
135 if (dev_is_pci(dev)) {
136 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700137
Will Deacona9a1b0b2014-05-01 18:05:08 +0100138 while (!pci_is_root_bus(bus))
139 bus = bus->parent;
Robin Murphyf80cd882016-09-14 15:21:39 +0100140 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100141 }
142
Robin Murphyf80cd882016-09-14 15:21:39 +0100143 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100144}
145
Robin Murphyf80cd882016-09-14 15:21:39 +0100146static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100147{
Robin Murphyf80cd882016-09-14 15:21:39 +0100148 *((__be32 *)data) = cpu_to_be32(alias);
149 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100150}
151
Robin Murphyf80cd882016-09-14 15:21:39 +0100152static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100153{
Robin Murphyf80cd882016-09-14 15:21:39 +0100154 struct of_phandle_iterator *it = *(void **)data;
155 struct device_node *np = it->node;
156 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100157
Robin Murphyf80cd882016-09-14 15:21:39 +0100158 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
159 "#stream-id-cells", 0)
160 if (it->node == np) {
161 *(void **)data = dev;
162 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700163 }
Robin Murphyf80cd882016-09-14 15:21:39 +0100164 it->node = np;
165 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100166}
167
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100168static struct platform_driver arm_smmu_driver;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100169static struct iommu_ops arm_smmu_ops;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100170
Robin Murphyadfec2e2016-09-12 17:13:55 +0100171static int arm_smmu_register_legacy_master(struct device *dev,
172 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100173{
Robin Murphyadfec2e2016-09-12 17:13:55 +0100174 struct device *smmu_dev;
Robin Murphyf80cd882016-09-14 15:21:39 +0100175 struct device_node *np;
176 struct of_phandle_iterator it;
177 void *data = &it;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100178 u32 *sids;
Robin Murphyf80cd882016-09-14 15:21:39 +0100179 __be32 pci_sid;
180 int err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100181
Robin Murphyf80cd882016-09-14 15:21:39 +0100182 np = dev_get_dev_node(dev);
183 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
184 of_node_put(np);
185 return -ENODEV;
186 }
187
188 it.node = np;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100189 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
190 __find_legacy_master_phandle);
Robin Murphyadfec2e2016-09-12 17:13:55 +0100191 smmu_dev = data;
Robin Murphyf80cd882016-09-14 15:21:39 +0100192 of_node_put(np);
193 if (err == 0)
194 return -ENODEV;
195 if (err < 0)
196 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100197
Robin Murphyf80cd882016-09-14 15:21:39 +0100198 if (dev_is_pci(dev)) {
199 /* "mmu-masters" assumes Stream ID == Requester ID */
200 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
201 &pci_sid);
202 it.cur = &pci_sid;
203 it.cur_count = 1;
204 }
205
Robin Murphyadfec2e2016-09-12 17:13:55 +0100206 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
207 &arm_smmu_ops);
208 if (err)
209 return err;
210
211 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
212 if (!sids)
Robin Murphyf80cd882016-09-14 15:21:39 +0100213 return -ENOMEM;
214
Robin Murphyadfec2e2016-09-12 17:13:55 +0100215 *smmu = dev_get_drvdata(smmu_dev);
216 of_phandle_iterator_args(&it, sids, it.cur_count);
217 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
218 kfree(sids);
219 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100220}
221
222static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
223{
224 int idx;
225
226 do {
227 idx = find_next_zero_bit(map, end, start);
228 if (idx == end)
229 return -ENOSPC;
230 } while (test_and_set_bit(idx, map));
231
232 return idx;
233}
234
235static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
236{
237 clear_bit(idx, map);
238}
239
240/* Wait for any pending TLB invalidations to complete */
Robin Murphy19713fd2019-08-15 19:37:30 +0100241static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
242 int sync, int status)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100243{
Robin Murphy8513c892017-03-30 17:56:32 +0100244 unsigned int spin_cnt, delay;
Robin Murphy19713fd2019-08-15 19:37:30 +0100245 u32 reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100246
Robin Murphy19713fd2019-08-15 19:37:30 +0100247 arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
Robin Murphy8513c892017-03-30 17:56:32 +0100248 for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
249 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100250 reg = arm_smmu_readl(smmu, page, status);
251 if (!(reg & sTLBGSTATUS_GSACTIVE))
Robin Murphy8513c892017-03-30 17:56:32 +0100252 return;
253 cpu_relax();
Will Deacon45ae7cf2013-06-24 18:31:25 +0100254 }
Robin Murphy8513c892017-03-30 17:56:32 +0100255 udelay(delay);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100256 }
Robin Murphy8513c892017-03-30 17:56:32 +0100257 dev_err_ratelimited(smmu->dev,
258 "TLB sync timed out -- SMMU may be deadlocked\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +0100259}
260
Robin Murphy11febfc2017-03-30 17:56:31 +0100261static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
Will Deacon1463fe42013-07-31 19:21:27 +0100262{
Will Deacon8e517e72017-07-06 15:55:48 +0100263 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100264
Will Deacon8e517e72017-07-06 15:55:48 +0100265 spin_lock_irqsave(&smmu->global_sync_lock, flags);
Robin Murphy00320ce2019-08-15 19:37:31 +0100266 __arm_smmu_tlb_sync(smmu, ARM_SMMU_GR0, ARM_SMMU_GR0_sTLBGSYNC,
Robin Murphy19713fd2019-08-15 19:37:30 +0100267 ARM_SMMU_GR0_sTLBGSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100268 spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000269}
270
Robin Murphy11febfc2017-03-30 17:56:31 +0100271static void arm_smmu_tlb_sync_context(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100272{
Will Deacon518f7132014-11-14 17:17:54 +0000273 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy11febfc2017-03-30 17:56:31 +0100274 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon8e517e72017-07-06 15:55:48 +0100275 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100276
Will Deacon8e517e72017-07-06 15:55:48 +0100277 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy19713fd2019-08-15 19:37:30 +0100278 __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx),
279 ARM_SMMU_CB_TLBSYNC, ARM_SMMU_CB_TLBSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100280 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000281}
282
Robin Murphy11febfc2017-03-30 17:56:31 +0100283static void arm_smmu_tlb_sync_vmid(void *cookie)
284{
285 struct arm_smmu_domain *smmu_domain = cookie;
286
287 arm_smmu_tlb_sync_global(smmu_domain->smmu);
288}
289
290static void arm_smmu_tlb_inv_context_s1(void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000291{
292 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy44f68762018-09-20 17:10:27 +0100293 /*
Robin Murphy19713fd2019-08-15 19:37:30 +0100294 * The TLBI write may be relaxed, so ensure that PTEs cleared by the
295 * current CPU are visible beforehand.
Robin Murphy44f68762018-09-20 17:10:27 +0100296 */
Robin Murphy19713fd2019-08-15 19:37:30 +0100297 wmb();
298 arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
299 ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
Robin Murphy11febfc2017-03-30 17:56:31 +0100300 arm_smmu_tlb_sync_context(cookie);
301}
302
303static void arm_smmu_tlb_inv_context_s2(void *cookie)
304{
305 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100306 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100307
Robin Murphy00320ce2019-08-15 19:37:31 +0100308 /* See above */
309 wmb();
310 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
Robin Murphy11febfc2017-03-30 17:56:31 +0100311 arm_smmu_tlb_sync_global(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100312}
313
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100314static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
315 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000316{
317 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100318 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000319 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy19713fd2019-08-15 19:37:30 +0100320 int reg, idx = cfg->cbndx;
Will Deacon518f7132014-11-14 17:17:54 +0000321
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100322 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
Will Deacon7d321bd32018-10-01 12:42:49 +0100323 wmb();
324
Robin Murphy19713fd2019-08-15 19:37:30 +0100325 reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
Will Deacon518f7132014-11-14 17:17:54 +0000326
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100327 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
328 iova = (iova >> 12) << 12;
329 iova |= cfg->asid;
Robin Murphy75df1382015-12-07 18:18:52 +0000330 do {
Robin Murphy19713fd2019-08-15 19:37:30 +0100331 arm_smmu_cb_write(smmu, idx, reg, iova);
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100332 iova += granule;
333 } while (size -= granule);
334 } else {
335 iova >>= 12;
336 iova |= (u64)cfg->asid << 48;
337 do {
Robin Murphy19713fd2019-08-15 19:37:30 +0100338 arm_smmu_cb_writeq(smmu, idx, reg, iova);
Robin Murphy75df1382015-12-07 18:18:52 +0000339 iova += granule >> 12;
340 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000341 }
342}
343
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100344static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
345 size_t granule, bool leaf, void *cookie)
346{
347 struct arm_smmu_domain *smmu_domain = cookie;
348 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy19713fd2019-08-15 19:37:30 +0100349 int reg, idx = smmu_domain->cfg.cbndx;
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100350
351 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
352 wmb();
353
Robin Murphy19713fd2019-08-15 19:37:30 +0100354 reg = leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100355 iova >>= 12;
356 do {
Robin Murphy61005762019-08-15 19:37:28 +0100357 if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy19713fd2019-08-15 19:37:30 +0100358 arm_smmu_cb_writeq(smmu, idx, reg, iova);
Robin Murphy61005762019-08-15 19:37:28 +0100359 else
Robin Murphy19713fd2019-08-15 19:37:30 +0100360 arm_smmu_cb_write(smmu, idx, reg, iova);
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100361 iova += granule >> 12;
362 } while (size -= granule);
363}
364
Robin Murphy11febfc2017-03-30 17:56:31 +0100365/*
366 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
367 * almost negligible, but the benefit of getting the first one in as far ahead
368 * of the sync as possible is significant, hence we don't just make this a
Will Deacone953f7f2019-07-02 16:44:50 +0100369 * no-op and set .tlb_sync to arm_smmu_tlb_inv_context_s2() as you might think.
Robin Murphy11febfc2017-03-30 17:56:31 +0100370 */
371static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
372 size_t granule, bool leaf, void *cookie)
373{
374 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy00320ce2019-08-15 19:37:31 +0100375 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy11febfc2017-03-30 17:56:31 +0100376
Robin Murphy00320ce2019-08-15 19:37:31 +0100377 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
Will Deacon7d321bd32018-10-01 12:42:49 +0100378 wmb();
379
Robin Murphy00320ce2019-08-15 19:37:31 +0100380 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
Robin Murphy11febfc2017-03-30 17:56:31 +0100381}
382
Will Deacon05aed942019-07-02 16:44:25 +0100383static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
384 size_t granule, void *cookie)
385{
386 struct arm_smmu_domain *smmu_domain = cookie;
Will Deaconabfd6fe2019-07-02 16:44:41 +0100387 const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
Will Deacon05aed942019-07-02 16:44:25 +0100388
Will Deaconabfd6fe2019-07-02 16:44:41 +0100389 ops->tlb_inv_range(iova, size, granule, false, cookie);
Will Deacone953f7f2019-07-02 16:44:50 +0100390 ops->tlb_sync(cookie);
Will Deacon05aed942019-07-02 16:44:25 +0100391}
392
393static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
394 size_t granule, void *cookie)
395{
396 struct arm_smmu_domain *smmu_domain = cookie;
Will Deaconabfd6fe2019-07-02 16:44:41 +0100397 const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
Will Deacon05aed942019-07-02 16:44:25 +0100398
Will Deaconabfd6fe2019-07-02 16:44:41 +0100399 ops->tlb_inv_range(iova, size, granule, true, cookie);
Will Deacone953f7f2019-07-02 16:44:50 +0100400 ops->tlb_sync(cookie);
Will Deacon05aed942019-07-02 16:44:25 +0100401}
402
Will Deacon3951c412019-07-02 16:45:15 +0100403static void arm_smmu_tlb_add_page(struct iommu_iotlb_gather *gather,
404 unsigned long iova, size_t granule,
Will Deaconabfd6fe2019-07-02 16:44:41 +0100405 void *cookie)
406{
407 struct arm_smmu_domain *smmu_domain = cookie;
408 const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
409
410 ops->tlb_inv_range(iova, granule, granule, true, cookie);
411}
412
413static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops = {
414 .tlb = {
415 .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
416 .tlb_flush_walk = arm_smmu_tlb_inv_walk,
417 .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
418 .tlb_add_page = arm_smmu_tlb_add_page,
Will Deaconabfd6fe2019-07-02 16:44:41 +0100419 },
Will Deacon15542402019-08-23 15:05:45 +0100420 .tlb_inv_range = arm_smmu_tlb_inv_range_s1,
Will Deacone953f7f2019-07-02 16:44:50 +0100421 .tlb_sync = arm_smmu_tlb_sync_context,
Robin Murphy11febfc2017-03-30 17:56:31 +0100422};
423
Will Deaconabfd6fe2019-07-02 16:44:41 +0100424static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
425 .tlb = {
426 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
427 .tlb_flush_walk = arm_smmu_tlb_inv_walk,
428 .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
429 .tlb_add_page = arm_smmu_tlb_add_page,
Will Deaconabfd6fe2019-07-02 16:44:41 +0100430 },
Will Deacon15542402019-08-23 15:05:45 +0100431 .tlb_inv_range = arm_smmu_tlb_inv_range_s2,
Will Deacone953f7f2019-07-02 16:44:50 +0100432 .tlb_sync = arm_smmu_tlb_sync_context,
Robin Murphy11febfc2017-03-30 17:56:31 +0100433};
434
Will Deaconabfd6fe2019-07-02 16:44:41 +0100435static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
436 .tlb = {
437 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
438 .tlb_flush_walk = arm_smmu_tlb_inv_walk,
439 .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
440 .tlb_add_page = arm_smmu_tlb_add_page,
Will Deaconabfd6fe2019-07-02 16:44:41 +0100441 },
442 .tlb_inv_range = arm_smmu_tlb_inv_vmid_nosync,
Will Deacone953f7f2019-07-02 16:44:50 +0100443 .tlb_sync = arm_smmu_tlb_sync_vmid,
Will Deacon518f7132014-11-14 17:17:54 +0000444};
445
Will Deacon45ae7cf2013-06-24 18:31:25 +0100446static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
447{
Vivek Gautambc580b52019-04-22 12:40:36 +0530448 u32 fsr, fsynr, cbfrsynra;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100449 unsigned long iova;
450 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100451 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100452 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy19713fd2019-08-15 19:37:30 +0100453 int idx = smmu_domain->cfg.cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100454
Robin Murphy19713fd2019-08-15 19:37:30 +0100455 fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100456 if (!(fsr & FSR_FAULT))
457 return IRQ_NONE;
458
Robin Murphy19713fd2019-08-15 19:37:30 +0100459 fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
460 iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
461 cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100462
Will Deacon3714ce1d2016-08-05 19:49:45 +0100463 dev_err_ratelimited(smmu->dev,
Vivek Gautambc580b52019-04-22 12:40:36 +0530464 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
Robin Murphy19713fd2019-08-15 19:37:30 +0100465 fsr, iova, fsynr, cbfrsynra, idx);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100466
Robin Murphy19713fd2019-08-15 19:37:30 +0100467 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100468 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100469}
470
471static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
472{
473 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
474 struct arm_smmu_device *smmu = dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100475
Robin Murphy00320ce2019-08-15 19:37:31 +0100476 gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
477 gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0);
478 gfsynr1 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR1);
479 gfsynr2 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100480
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000481 if (!gfsr)
482 return IRQ_NONE;
483
Will Deacon45ae7cf2013-06-24 18:31:25 +0100484 dev_err_ratelimited(smmu->dev,
485 "Unexpected global fault, this could be serious\n");
486 dev_err_ratelimited(smmu->dev,
487 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
488 gfsr, gfsynr0, gfsynr1, gfsynr2);
489
Robin Murphy00320ce2019-08-15 19:37:31 +0100490 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr);
Will Deaconadaba322013-07-31 19:21:26 +0100491 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100492}
493
Will Deacon518f7132014-11-14 17:17:54 +0000494static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
495 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100496{
Will Deacon44680ee2014-06-25 11:29:12 +0100497 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy90df3732017-08-08 14:56:14 +0100498 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
499 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
500
501 cb->cfg = cfg;
502
Robin Murphy620565a2019-08-15 19:37:25 +0100503 /* TCR */
Robin Murphy90df3732017-08-08 14:56:14 +0100504 if (stage1) {
505 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
506 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
507 } else {
508 cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
509 cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Robin Murphy620565a2019-08-15 19:37:25 +0100510 cb->tcr[1] |= FIELD_PREP(TCR2_SEP, TCR2_SEP_UPSTREAM);
Robin Murphy90df3732017-08-08 14:56:14 +0100511 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy620565a2019-08-15 19:37:25 +0100512 cb->tcr[1] |= TCR2_AS;
Robin Murphy90df3732017-08-08 14:56:14 +0100513 }
514 } else {
515 cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
516 }
517
518 /* TTBRs */
519 if (stage1) {
520 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
521 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
522 cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
523 } else {
524 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Robin Murphy620565a2019-08-15 19:37:25 +0100525 cb->ttbr[0] |= FIELD_PREP(TTBRn_ASID, cfg->asid);
Robin Murphy90df3732017-08-08 14:56:14 +0100526 cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Robin Murphy620565a2019-08-15 19:37:25 +0100527 cb->ttbr[1] |= FIELD_PREP(TTBRn_ASID, cfg->asid);
Robin Murphy90df3732017-08-08 14:56:14 +0100528 }
529 } else {
530 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
531 }
532
533 /* MAIRs (stage-1 only) */
534 if (stage1) {
535 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
536 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
537 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
538 } else {
539 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
540 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
541 }
542 }
543}
544
545static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
546{
547 u32 reg;
548 bool stage1;
549 struct arm_smmu_cb *cb = &smmu->cbs[idx];
550 struct arm_smmu_cfg *cfg = cb->cfg;
Robin Murphy90df3732017-08-08 14:56:14 +0100551
552 /* Unassigned context banks only need disabling */
553 if (!cfg) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100554 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, 0);
Robin Murphy90df3732017-08-08 14:56:14 +0100555 return;
556 }
557
Will Deacon44680ee2014-06-25 11:29:12 +0100558 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100559
Robin Murphy90df3732017-08-08 14:56:14 +0100560 /* CBA2R */
Will Deacon4a1c93c2015-03-04 12:21:03 +0000561 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100562 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy5114e962019-08-15 19:37:24 +0100563 reg = CBA2R_VA64;
Robin Murphy7602b872016-04-28 17:12:09 +0100564 else
Robin Murphy5114e962019-08-15 19:37:24 +0100565 reg = 0;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800566 /* 16-bit VMIDs live in CBA2R */
567 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Robin Murphy5114e962019-08-15 19:37:24 +0100568 reg |= FIELD_PREP(CBA2R_VMID16, cfg->vmid);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800569
Robin Murphyaadbf212019-08-15 19:37:29 +0100570 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBA2R(idx), reg);
Will Deacon4a1c93c2015-03-04 12:21:03 +0000571 }
572
Will Deacon45ae7cf2013-06-24 18:31:25 +0100573 /* CBAR */
Robin Murphy5114e962019-08-15 19:37:24 +0100574 reg = FIELD_PREP(CBAR_TYPE, cfg->cbar);
Robin Murphyb7862e32016-04-13 18:13:03 +0100575 if (smmu->version < ARM_SMMU_V2)
Robin Murphy5114e962019-08-15 19:37:24 +0100576 reg |= FIELD_PREP(CBAR_IRPTNDX, cfg->irptndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100577
Will Deacon57ca90f2014-02-06 14:59:05 +0000578 /*
579 * Use the weakest shareability/memory types, so they are
580 * overridden by the ttbcr/pte.
581 */
582 if (stage1) {
Robin Murphy5114e962019-08-15 19:37:24 +0100583 reg |= FIELD_PREP(CBAR_S1_BPSHCFG, CBAR_S1_BPSHCFG_NSH) |
584 FIELD_PREP(CBAR_S1_MEMATTR, CBAR_S1_MEMATTR_WB);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800585 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
586 /* 8-bit VMIDs live in CBAR */
Robin Murphy5114e962019-08-15 19:37:24 +0100587 reg |= FIELD_PREP(CBAR_VMID, cfg->vmid);
Will Deacon57ca90f2014-02-06 14:59:05 +0000588 }
Robin Murphyaadbf212019-08-15 19:37:29 +0100589 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100590
Sunil Goutham125458a2017-03-28 16:11:12 +0530591 /*
Robin Murphy620565a2019-08-15 19:37:25 +0100592 * TCR
Sunil Goutham125458a2017-03-28 16:11:12 +0530593 * We must write this before the TTBRs, since it determines the
594 * access behaviour of some fields (in particular, ASID[15:8]).
595 */
Robin Murphy90df3732017-08-08 14:56:14 +0100596 if (stage1 && smmu->version > ARM_SMMU_V1)
Robin Murphy19713fd2019-08-15 19:37:30 +0100597 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]);
598 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100599
Will Deacon45ae7cf2013-06-24 18:31:25 +0100600 /* TTBRs */
Robin Murphy90df3732017-08-08 14:56:14 +0100601 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100602 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid);
603 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
604 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100605 } else {
Robin Murphy19713fd2019-08-15 19:37:30 +0100606 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
Robin Murphy90df3732017-08-08 14:56:14 +0100607 if (stage1)
Robin Murphy19713fd2019-08-15 19:37:30 +0100608 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR1,
609 cb->ttbr[1]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100610 }
611
Will Deacon518f7132014-11-14 17:17:54 +0000612 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100613 if (stage1) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100614 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]);
615 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100616 }
617
Will Deacon45ae7cf2013-06-24 18:31:25 +0100618 /* SCTLR */
Robin Murphy60705292016-08-11 17:44:06 +0100619 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100620 if (stage1)
621 reg |= SCTLR_S1_ASIDPNE;
Robin Murphy90df3732017-08-08 14:56:14 +0100622 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
623 reg |= SCTLR_E;
624
Robin Murphy19713fd2019-08-15 19:37:30 +0100625 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100626}
627
628static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100629 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100630{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100631 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000632 unsigned long ias, oas;
633 struct io_pgtable_ops *pgtbl_ops;
634 struct io_pgtable_cfg pgtbl_cfg;
635 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100636 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100637 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100638
Will Deacon518f7132014-11-14 17:17:54 +0000639 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100640 if (smmu_domain->smmu)
641 goto out_unlock;
642
Will Deacon61bc6712017-01-06 16:56:03 +0000643 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
644 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
645 smmu_domain->smmu = smmu;
646 goto out_unlock;
647 }
648
Will Deaconc752ce42014-06-25 22:46:31 +0100649 /*
650 * Mapping the requested stage onto what we support is surprisingly
651 * complicated, mainly because the spec allows S1+S2 SMMUs without
652 * support for nested translation. That means we end up with the
653 * following table:
654 *
655 * Requested Supported Actual
656 * S1 N S1
657 * S1 S1+S2 S1
658 * S1 S2 S2
659 * S1 S1 S1
660 * N N N
661 * N S1+S2 S2
662 * N S2 S2
663 * N S1 S1
664 *
665 * Note that you can't actually request stage-2 mappings.
666 */
667 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
668 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
669 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
670 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
671
Robin Murphy7602b872016-04-28 17:12:09 +0100672 /*
673 * Choosing a suitable context format is even more fiddly. Until we
674 * grow some way for the caller to express a preference, and/or move
675 * the decision into the io-pgtable code where it arguably belongs,
676 * just aim for the closest thing to the rest of the system, and hope
677 * that the hardware isn't esoteric enough that we can't assume AArch64
678 * support to be a superset of AArch32 support...
679 */
680 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
681 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100682 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
683 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
684 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
685 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
686 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100687 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
688 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
689 ARM_SMMU_FEAT_FMT_AARCH64_16K |
690 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
691 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
692
693 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
694 ret = -EINVAL;
695 goto out_unlock;
696 }
697
Will Deaconc752ce42014-06-25 22:46:31 +0100698 switch (smmu_domain->stage) {
699 case ARM_SMMU_DOMAIN_S1:
700 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
701 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000702 ias = smmu->va_size;
703 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100704 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000705 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +0100706 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +0000707 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100708 ias = min(ias, 32UL);
709 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +0100710 } else {
711 fmt = ARM_V7S;
712 ias = min(ias, 32UL);
713 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +0100714 }
Will Deaconabfd6fe2019-07-02 16:44:41 +0100715 smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops;
Will Deaconc752ce42014-06-25 22:46:31 +0100716 break;
717 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100718 /*
719 * We will likely want to change this if/when KVM gets
720 * involved.
721 */
Will Deaconc752ce42014-06-25 22:46:31 +0100722 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100723 cfg->cbar = CBAR_TYPE_S2_TRANS;
724 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000725 ias = smmu->ipa_size;
726 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100727 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000728 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100729 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000730 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100731 ias = min(ias, 40UL);
732 oas = min(oas, 40UL);
733 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100734 if (smmu->version == ARM_SMMU_V2)
Will Deaconabfd6fe2019-07-02 16:44:41 +0100735 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2;
Robin Murphy11febfc2017-03-30 17:56:31 +0100736 else
Will Deaconabfd6fe2019-07-02 16:44:41 +0100737 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1;
Will Deaconc752ce42014-06-25 22:46:31 +0100738 break;
739 default:
740 ret = -EINVAL;
741 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100742 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100743 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
744 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200745 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100746 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100747
Will Deacon44680ee2014-06-25 11:29:12 +0100748 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100749 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100750 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
751 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100752 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100753 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100754 }
755
Robin Murphy280b6832017-03-30 17:56:29 +0100756 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
Robin Murphyba7e4a02019-08-15 19:37:37 +0100757 cfg->vmid = cfg->cbndx + 1;
Robin Murphy280b6832017-03-30 17:56:29 +0100758 else
Robin Murphyba7e4a02019-08-15 19:37:37 +0100759 cfg->asid = cfg->cbndx;
760
761 smmu_domain->smmu = smmu;
762 if (smmu->impl && smmu->impl->init_context) {
763 ret = smmu->impl->init_context(smmu_domain);
764 if (ret)
765 goto out_unlock;
766 }
Robin Murphy280b6832017-03-30 17:56:29 +0100767
Will Deacon518f7132014-11-14 17:17:54 +0000768 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100769 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000770 .ias = ias,
771 .oas = oas,
Will Deacon4f418452019-06-25 12:51:25 +0100772 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
Will Deaconabfd6fe2019-07-02 16:44:41 +0100773 .tlb = &smmu_domain->flush_ops->tlb,
Robin Murphy2df7a252015-07-29 19:46:06 +0100774 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000775 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100776
Robin Murphy44f68762018-09-20 17:10:27 +0100777 if (smmu_domain->non_strict)
778 pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
779
Will Deacon518f7132014-11-14 17:17:54 +0000780 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
781 if (!pgtbl_ops) {
782 ret = -ENOMEM;
783 goto out_clear_smmu;
784 }
785
Robin Murphyd5466352016-05-09 17:20:09 +0100786 /* Update the domain's page sizes to reflect the page table format */
787 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Robin Murphy455eb7d2016-09-12 17:13:58 +0100788 domain->geometry.aperture_end = (1UL << ias) - 1;
789 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +0000790
791 /* Initialise the context bank with our page table cfg */
792 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
Robin Murphy90df3732017-08-08 14:56:14 +0100793 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000794
795 /*
796 * Request context fault interrupt. Do this last to avoid the
797 * handler seeing a half-initialised domain state.
798 */
Will Deacon44680ee2014-06-25 11:29:12 +0100799 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800800 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
801 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200802 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100803 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100804 cfg->irptndx, irq);
805 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100806 }
807
Will Deacon518f7132014-11-14 17:17:54 +0000808 mutex_unlock(&smmu_domain->init_mutex);
809
810 /* Publish page table ops for map/unmap */
811 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100812 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100813
Will Deacon518f7132014-11-14 17:17:54 +0000814out_clear_smmu:
815 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100816out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +0000817 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100818 return ret;
819}
820
821static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
822{
Joerg Roedel1d672632015-03-26 13:43:10 +0100823 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100824 struct arm_smmu_device *smmu = smmu_domain->smmu;
825 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Sricharan Rd4a44f02018-12-04 11:52:10 +0530826 int ret, irq;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100827
Will Deacon61bc6712017-01-06 16:56:03 +0000828 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100829 return;
830
Sricharan Rd4a44f02018-12-04 11:52:10 +0530831 ret = arm_smmu_rpm_get(smmu);
832 if (ret < 0)
833 return;
834
Will Deacon518f7132014-11-14 17:17:54 +0000835 /*
836 * Disable the context bank and free the page tables before freeing
837 * it.
838 */
Robin Murphy90df3732017-08-08 14:56:14 +0100839 smmu->cbs[cfg->cbndx].cfg = NULL;
840 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100841
Will Deacon44680ee2014-06-25 11:29:12 +0100842 if (cfg->irptndx != INVALID_IRPTNDX) {
843 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800844 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100845 }
846
Markus Elfring44830b02015-11-06 18:32:41 +0100847 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +0100848 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Sricharan Rd4a44f02018-12-04 11:52:10 +0530849
850 arm_smmu_rpm_put(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100851}
852
Joerg Roedel1d672632015-03-26 13:43:10 +0100853static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100854{
855 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100856
Will Deacon61bc6712017-01-06 16:56:03 +0000857 if (type != IOMMU_DOMAIN_UNMANAGED &&
858 type != IOMMU_DOMAIN_DMA &&
859 type != IOMMU_DOMAIN_IDENTITY)
Joerg Roedel1d672632015-03-26 13:43:10 +0100860 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100861 /*
862 * Allocate the domain and initialise some of its data structures.
863 * We can't really do anything meaningful until we've added a
864 * master.
865 */
866 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
867 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +0100868 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100869
Robin Murphy021bb842016-09-14 15:26:46 +0100870 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
871 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +0000872 kfree(smmu_domain);
873 return NULL;
874 }
875
Will Deacon518f7132014-11-14 17:17:54 +0000876 mutex_init(&smmu_domain->init_mutex);
Robin Murphy523d7422017-06-22 16:53:56 +0100877 spin_lock_init(&smmu_domain->cb_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +0100878
879 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100880}
881
Joerg Roedel1d672632015-03-26 13:43:10 +0100882static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100883{
Joerg Roedel1d672632015-03-26 13:43:10 +0100884 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +0100885
886 /*
887 * Free the domain resources. We assume that all devices have
888 * already been detached.
889 */
Robin Murphy9adb9592016-01-26 18:06:36 +0000890 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100891 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100892 kfree(smmu_domain);
893}
894
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100895static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
896{
897 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphy0caf5f42019-08-15 19:37:23 +0100898 u32 reg = FIELD_PREP(SMR_ID, smr->id) | FIELD_PREP(SMR_MASK, smr->mask);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100899
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300900 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100901 reg |= SMR_VALID;
Robin Murphy00320ce2019-08-15 19:37:31 +0100902 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100903}
904
Robin Murphy8e8b2032016-09-12 17:13:50 +0100905static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
906{
907 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
Robin Murphy0caf5f42019-08-15 19:37:23 +0100908 u32 reg = FIELD_PREP(S2CR_TYPE, s2cr->type) |
909 FIELD_PREP(S2CR_CBNDX, s2cr->cbndx) |
910 FIELD_PREP(S2CR_PRIVCFG, s2cr->privcfg);
Robin Murphy8e8b2032016-09-12 17:13:50 +0100911
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300912 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
913 smmu->smrs[idx].valid)
914 reg |= S2CR_EXIDVALID;
Robin Murphy00320ce2019-08-15 19:37:31 +0100915 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
Robin Murphy8e8b2032016-09-12 17:13:50 +0100916}
917
918static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
919{
920 arm_smmu_write_s2cr(smmu, idx);
921 if (smmu->smrs)
922 arm_smmu_write_smr(smmu, idx);
923}
924
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300925/*
926 * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
927 * should be called after sCR0 is written.
928 */
929static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
930{
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300931 u32 smr;
932
933 if (!smmu->smrs)
934 return;
935
936 /*
937 * SMR.ID bits may not be preserved if the corresponding MASK
938 * bits are set, so check each one separately. We can reject
939 * masters later if they try to claim IDs outside these masks.
940 */
Robin Murphy0caf5f42019-08-15 19:37:23 +0100941 smr = FIELD_PREP(SMR_ID, smmu->streamid_mask);
Robin Murphy00320ce2019-08-15 19:37:31 +0100942 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(0), smr);
943 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(0));
Robin Murphy0caf5f42019-08-15 19:37:23 +0100944 smmu->streamid_mask = FIELD_GET(SMR_ID, smr);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300945
Robin Murphy0caf5f42019-08-15 19:37:23 +0100946 smr = FIELD_PREP(SMR_MASK, smmu->streamid_mask);
Robin Murphy00320ce2019-08-15 19:37:31 +0100947 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(0), smr);
948 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(0));
Robin Murphy0caf5f42019-08-15 19:37:23 +0100949 smmu->smr_mask_mask = FIELD_GET(SMR_MASK, smr);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300950}
951
Robin Murphy588888a2016-09-12 17:13:54 +0100952static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100953{
954 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy588888a2016-09-12 17:13:54 +0100955 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100956
Robin Murphy588888a2016-09-12 17:13:54 +0100957 /* Stream indexing is blissfully easy */
958 if (!smrs)
959 return id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100960
Robin Murphy588888a2016-09-12 17:13:54 +0100961 /* Validating SMRs is... less so */
962 for (i = 0; i < smmu->num_mapping_groups; ++i) {
963 if (!smrs[i].valid) {
964 /*
965 * Note the first free entry we come across, which
966 * we'll claim in the end if nothing else matches.
967 */
968 if (free_idx < 0)
969 free_idx = i;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100970 continue;
971 }
Robin Murphy588888a2016-09-12 17:13:54 +0100972 /*
973 * If the new entry is _entirely_ matched by an existing entry,
974 * then reuse that, with the guarantee that there also cannot
975 * be any subsequent conflicting entries. In normal use we'd
976 * expect simply identical entries for this case, but there's
977 * no harm in accommodating the generalisation.
978 */
979 if ((mask & smrs[i].mask) == mask &&
980 !((id ^ smrs[i].id) & ~smrs[i].mask))
981 return i;
982 /*
983 * If the new entry has any other overlap with an existing one,
984 * though, then there always exists at least one stream ID
985 * which would cause a conflict, and we can't allow that risk.
986 */
987 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
988 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100989 }
990
Robin Murphy588888a2016-09-12 17:13:54 +0100991 return free_idx;
992}
993
994static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
995{
996 if (--smmu->s2crs[idx].count)
997 return false;
998
999 smmu->s2crs[idx] = s2cr_init_val;
1000 if (smmu->smrs)
1001 smmu->smrs[idx].valid = false;
1002
1003 return true;
1004}
1005
1006static int arm_smmu_master_alloc_smes(struct device *dev)
1007{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001008 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001009 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy588888a2016-09-12 17:13:54 +01001010 struct arm_smmu_device *smmu = cfg->smmu;
1011 struct arm_smmu_smr *smrs = smmu->smrs;
1012 struct iommu_group *group;
1013 int i, idx, ret;
1014
1015 mutex_lock(&smmu->stream_map_mutex);
1016 /* Figure out a viable stream map entry allocation */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001017 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001018 u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]);
1019 u16 mask = FIELD_GET(SMR_MASK, fwspec->ids[i]);
Robin Murphy021bb842016-09-14 15:26:46 +01001020
Robin Murphy588888a2016-09-12 17:13:54 +01001021 if (idx != INVALID_SMENDX) {
1022 ret = -EEXIST;
1023 goto out_err;
1024 }
1025
Robin Murphy021bb842016-09-14 15:26:46 +01001026 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy588888a2016-09-12 17:13:54 +01001027 if (ret < 0)
1028 goto out_err;
1029
1030 idx = ret;
1031 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy021bb842016-09-14 15:26:46 +01001032 smrs[idx].id = sid;
1033 smrs[idx].mask = mask;
Robin Murphy588888a2016-09-12 17:13:54 +01001034 smrs[idx].valid = true;
1035 }
1036 smmu->s2crs[idx].count++;
1037 cfg->smendx[i] = (s16)idx;
1038 }
1039
1040 group = iommu_group_get_for_dev(dev);
1041 if (!group)
1042 group = ERR_PTR(-ENOMEM);
1043 if (IS_ERR(group)) {
1044 ret = PTR_ERR(group);
1045 goto out_err;
1046 }
1047 iommu_group_put(group);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001048
Will Deacon45ae7cf2013-06-24 18:31:25 +01001049 /* It worked! Now, poke the actual hardware */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001050 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001051 arm_smmu_write_sme(smmu, idx);
1052 smmu->s2crs[idx].group = group;
1053 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001054
Robin Murphy588888a2016-09-12 17:13:54 +01001055 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001056 return 0;
1057
Robin Murphy588888a2016-09-12 17:13:54 +01001058out_err:
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001059 while (i--) {
Robin Murphy588888a2016-09-12 17:13:54 +01001060 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001061 cfg->smendx[i] = INVALID_SMENDX;
1062 }
Robin Murphy588888a2016-09-12 17:13:54 +01001063 mutex_unlock(&smmu->stream_map_mutex);
1064 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001065}
1066
Robin Murphyadfec2e2016-09-12 17:13:55 +01001067static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001068{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001069 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1070 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphyd3097e32016-09-12 17:13:53 +01001071 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001072
Robin Murphy588888a2016-09-12 17:13:54 +01001073 mutex_lock(&smmu->stream_map_mutex);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001074 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001075 if (arm_smmu_free_sme(smmu, idx))
1076 arm_smmu_write_sme(smmu, idx);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001077 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001078 }
Robin Murphy588888a2016-09-12 17:13:54 +01001079 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001080}
1081
Will Deacon45ae7cf2013-06-24 18:31:25 +01001082static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphyadfec2e2016-09-12 17:13:55 +01001083 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001084{
Will Deacon44680ee2014-06-25 11:29:12 +01001085 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001086 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001087 u8 cbndx = smmu_domain->cfg.cbndx;
Will Deacon61bc6712017-01-06 16:56:03 +00001088 enum arm_smmu_s2cr_type type;
Robin Murphy588888a2016-09-12 17:13:54 +01001089 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001090
Will Deacon61bc6712017-01-06 16:56:03 +00001091 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
1092 type = S2CR_TYPE_BYPASS;
1093 else
1094 type = S2CR_TYPE_TRANS;
1095
Robin Murphyadfec2e2016-09-12 17:13:55 +01001096 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy8e8b2032016-09-12 17:13:50 +01001097 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy588888a2016-09-12 17:13:54 +01001098 continue;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001099
Robin Murphy8e8b2032016-09-12 17:13:50 +01001100 s2cr[idx].type = type;
Sricharan Re1989802017-01-06 18:58:15 +05301101 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001102 s2cr[idx].cbndx = cbndx;
1103 arm_smmu_write_s2cr(smmu, idx);
Will Deacon43b412b2014-07-15 11:22:24 +01001104 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001105 return 0;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001106}
1107
Will Deacon45ae7cf2013-06-24 18:31:25 +01001108static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1109{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001110 int ret;
Joerg Roedel9b468f72018-11-29 14:01:00 +01001111 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001112 struct arm_smmu_device *smmu;
Joerg Roedel1d672632015-03-26 13:43:10 +01001113 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001114
Robin Murphyadfec2e2016-09-12 17:13:55 +01001115 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001116 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1117 return -ENXIO;
1118 }
1119
Robin Murphyfba4f8e2016-10-17 12:06:21 +01001120 /*
1121 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1122 * domains between of_xlate() and add_device() - we have no way to cope
1123 * with that, so until ARM gets converted to rely on groups and default
1124 * domains, just say no (but more politely than by dereferencing NULL).
1125 * This should be at least a WARN_ON once that's sorted.
1126 */
1127 if (!fwspec->iommu_priv)
1128 return -ENODEV;
1129
Robin Murphyadfec2e2016-09-12 17:13:55 +01001130 smmu = fwspec_smmu(fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301131
1132 ret = arm_smmu_rpm_get(smmu);
1133 if (ret < 0)
1134 return ret;
1135
Will Deacon518f7132014-11-14 17:17:54 +00001136 /* Ensure that the domain is finalised */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001137 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001138 if (ret < 0)
Sricharan Rd4a44f02018-12-04 11:52:10 +05301139 goto rpm_put;
Will Deacon518f7132014-11-14 17:17:54 +00001140
Will Deacon45ae7cf2013-06-24 18:31:25 +01001141 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001142 * Sanity check the domain. We don't support domains across
1143 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001144 */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001145 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001146 dev_err(dev,
1147 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphyadfec2e2016-09-12 17:13:55 +01001148 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Sricharan Rd4a44f02018-12-04 11:52:10 +05301149 ret = -EINVAL;
1150 goto rpm_put;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001151 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001152
1153 /* Looks ok, so add the device to the domain */
Sricharan Rd4a44f02018-12-04 11:52:10 +05301154 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
1155
1156rpm_put:
1157 arm_smmu_rpm_put(smmu);
1158 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001159}
1160
Will Deacon45ae7cf2013-06-24 18:31:25 +01001161static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001162 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001163{
Robin Murphy523d7422017-06-22 16:53:56 +01001164 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301165 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1166 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001167
Will Deacon518f7132014-11-14 17:17:54 +00001168 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001169 return -ENODEV;
1170
Sricharan Rd4a44f02018-12-04 11:52:10 +05301171 arm_smmu_rpm_get(smmu);
1172 ret = ops->map(ops, iova, paddr, size, prot);
1173 arm_smmu_rpm_put(smmu);
1174
1175 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001176}
1177
1178static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
Will Deacon56f8af52019-07-02 16:44:06 +01001179 size_t size, struct iommu_iotlb_gather *gather)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001180{
Robin Murphy523d7422017-06-22 16:53:56 +01001181 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301182 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1183 size_t ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001184
Will Deacon518f7132014-11-14 17:17:54 +00001185 if (!ops)
1186 return 0;
1187
Sricharan Rd4a44f02018-12-04 11:52:10 +05301188 arm_smmu_rpm_get(smmu);
Will Deacona2d3a382019-07-02 16:44:58 +01001189 ret = ops->unmap(ops, iova, size, gather);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301190 arm_smmu_rpm_put(smmu);
1191
1192 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001193}
1194
Robin Murphy44f68762018-09-20 17:10:27 +01001195static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
1196{
1197 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301198 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy44f68762018-09-20 17:10:27 +01001199
Will Deaconabfd6fe2019-07-02 16:44:41 +01001200 if (smmu_domain->flush_ops) {
Sricharan Rd4a44f02018-12-04 11:52:10 +05301201 arm_smmu_rpm_get(smmu);
Will Deaconabfd6fe2019-07-02 16:44:41 +01001202 smmu_domain->flush_ops->tlb.tlb_flush_all(smmu_domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301203 arm_smmu_rpm_put(smmu);
1204 }
Robin Murphy44f68762018-09-20 17:10:27 +01001205}
1206
Will Deacon56f8af52019-07-02 16:44:06 +01001207static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
1208 struct iommu_iotlb_gather *gather)
Robin Murphy32b12442017-09-28 15:55:01 +01001209{
1210 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301211 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy32b12442017-09-28 15:55:01 +01001212
Will Deaconabfd6fe2019-07-02 16:44:41 +01001213 if (smmu_domain->flush_ops) {
Sricharan Rd4a44f02018-12-04 11:52:10 +05301214 arm_smmu_rpm_get(smmu);
Will Deacone953f7f2019-07-02 16:44:50 +01001215 smmu_domain->flush_ops->tlb_sync(smmu_domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301216 arm_smmu_rpm_put(smmu);
1217 }
Robin Murphy32b12442017-09-28 15:55:01 +01001218}
1219
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001220static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1221 dma_addr_t iova)
1222{
Joerg Roedel1d672632015-03-26 13:43:10 +01001223 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001224 struct arm_smmu_device *smmu = smmu_domain->smmu;
1225 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1226 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1227 struct device *dev = smmu->dev;
Robin Murphy19713fd2019-08-15 19:37:30 +01001228 void __iomem *reg;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001229 u32 tmp;
1230 u64 phys;
Robin Murphy523d7422017-06-22 16:53:56 +01001231 unsigned long va, flags;
Robin Murphy19713fd2019-08-15 19:37:30 +01001232 int ret, idx = cfg->cbndx;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301233
1234 ret = arm_smmu_rpm_get(smmu);
1235 if (ret < 0)
1236 return 0;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001237
Robin Murphy523d7422017-06-22 16:53:56 +01001238 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy661d9622015-05-27 17:09:34 +01001239 va = iova & ~0xfffUL;
Robin Murphy61005762019-08-15 19:37:28 +01001240 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy19713fd2019-08-15 19:37:30 +01001241 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
Robin Murphy61005762019-08-15 19:37:28 +01001242 else
Robin Murphy19713fd2019-08-15 19:37:30 +01001243 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001244
Robin Murphy19713fd2019-08-15 19:37:30 +01001245 reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR;
1246 if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ATSR_ACTIVE), 5, 50)) {
Robin Murphy523d7422017-06-22 16:53:56 +01001247 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001248 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001249 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001250 &iova);
1251 return ops->iova_to_phys(ops, iova);
1252 }
1253
Robin Murphy19713fd2019-08-15 19:37:30 +01001254 phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR);
Robin Murphy523d7422017-06-22 16:53:56 +01001255 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001256 if (phys & CB_PAR_F) {
1257 dev_err(dev, "translation fault!\n");
1258 dev_err(dev, "PAR = 0x%llx\n", phys);
1259 return 0;
1260 }
1261
Sricharan Rd4a44f02018-12-04 11:52:10 +05301262 arm_smmu_rpm_put(smmu);
1263
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001264 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1265}
1266
Will Deacon45ae7cf2013-06-24 18:31:25 +01001267static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001268 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001269{
Joerg Roedel1d672632015-03-26 13:43:10 +01001270 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Robin Murphy523d7422017-06-22 16:53:56 +01001271 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001272
Sunil Gouthambdf95922017-04-25 15:27:52 +05301273 if (domain->type == IOMMU_DOMAIN_IDENTITY)
1274 return iova;
1275
Will Deacon518f7132014-11-14 17:17:54 +00001276 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001277 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001278
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001279 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
Robin Murphy523d7422017-06-22 16:53:56 +01001280 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1281 return arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001282
Robin Murphy523d7422017-06-22 16:53:56 +01001283 return ops->iova_to_phys(ops, iova);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001284}
1285
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001286static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001287{
Will Deacond0948942014-06-24 17:30:10 +01001288 switch (cap) {
1289 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001290 /*
1291 * Return true here as the SMMU can always send out coherent
1292 * requests.
1293 */
1294 return true;
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001295 case IOMMU_CAP_NOEXEC:
1296 return true;
Will Deacond0948942014-06-24 17:30:10 +01001297 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001298 return false;
Will Deacond0948942014-06-24 17:30:10 +01001299 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001300}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001301
Suzuki K Poulose92ce7e82019-06-14 18:54:00 +01001302static int arm_smmu_match_node(struct device *dev, const void *data)
Robin Murphy021bb842016-09-14 15:26:46 +01001303{
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001304 return dev->fwnode == data;
Robin Murphy021bb842016-09-14 15:26:46 +01001305}
1306
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001307static
1308struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
Robin Murphy021bb842016-09-14 15:26:46 +01001309{
1310 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001311 fwnode, arm_smmu_match_node);
Robin Murphy021bb842016-09-14 15:26:46 +01001312 put_device(dev);
1313 return dev ? dev_get_drvdata(dev) : NULL;
1314}
1315
Will Deacon03edb222015-01-19 14:27:33 +00001316static int arm_smmu_add_device(struct device *dev)
1317{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001318 struct arm_smmu_device *smmu;
Robin Murphyf80cd882016-09-14 15:21:39 +01001319 struct arm_smmu_master_cfg *cfg;
Joerg Roedel9b468f72018-11-29 14:01:00 +01001320 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001321 int i, ret;
1322
Robin Murphy021bb842016-09-14 15:26:46 +01001323 if (using_legacy_binding) {
1324 ret = arm_smmu_register_legacy_master(dev, &smmu);
Artem Savkova7990c62017-08-08 12:26:02 +02001325
1326 /*
1327 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
1328 * will allocate/initialise a new one. Thus we need to update fwspec for
1329 * later use.
1330 */
Joerg Roedel9b468f72018-11-29 14:01:00 +01001331 fwspec = dev_iommu_fwspec_get(dev);
Robin Murphy021bb842016-09-14 15:26:46 +01001332 if (ret)
1333 goto out_free;
Robin Murphy3c117b52016-11-02 17:31:32 +00001334 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001335 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
Robin Murphy021bb842016-09-14 15:26:46 +01001336 } else {
1337 return -ENODEV;
1338 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001339
1340 ret = -EINVAL;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001341 for (i = 0; i < fwspec->num_ids; i++) {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001342 u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]);
1343 u16 mask = FIELD_GET(SMR_MASK, fwspec->ids[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01001344
Robin Murphyadfec2e2016-09-12 17:13:55 +01001345 if (sid & ~smmu->streamid_mask) {
Robin Murphyf80cd882016-09-14 15:21:39 +01001346 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy021bb842016-09-14 15:26:46 +01001347 sid, smmu->streamid_mask);
1348 goto out_free;
1349 }
1350 if (mask & ~smmu->smr_mask_mask) {
1351 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
Peng Fan6323f472017-04-21 17:03:36 +08001352 mask, smmu->smr_mask_mask);
Robin Murphyf80cd882016-09-14 15:21:39 +01001353 goto out_free;
1354 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001355 }
Will Deacon03edb222015-01-19 14:27:33 +00001356
Robin Murphyadfec2e2016-09-12 17:13:55 +01001357 ret = -ENOMEM;
1358 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1359 GFP_KERNEL);
1360 if (!cfg)
1361 goto out_free;
1362
1363 cfg->smmu = smmu;
1364 fwspec->iommu_priv = cfg;
1365 while (i--)
1366 cfg->smendx[i] = INVALID_SMENDX;
1367
Sricharan Rd4a44f02018-12-04 11:52:10 +05301368 ret = arm_smmu_rpm_get(smmu);
1369 if (ret < 0)
1370 goto out_cfg_free;
1371
Robin Murphy588888a2016-09-12 17:13:54 +01001372 ret = arm_smmu_master_alloc_smes(dev);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301373 arm_smmu_rpm_put(smmu);
1374
Robin Murphyadfec2e2016-09-12 17:13:55 +01001375 if (ret)
Vivek Gautamc54451a2017-07-06 15:07:00 +05301376 goto out_cfg_free;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001377
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001378 iommu_device_link(&smmu->iommu, dev);
1379
Sricharan R655e3642018-12-04 11:52:11 +05301380 device_link_add(dev, smmu->dev,
1381 DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
1382
Robin Murphyadfec2e2016-09-12 17:13:55 +01001383 return 0;
Robin Murphyf80cd882016-09-14 15:21:39 +01001384
Vivek Gautamc54451a2017-07-06 15:07:00 +05301385out_cfg_free:
1386 kfree(cfg);
Robin Murphyf80cd882016-09-14 15:21:39 +01001387out_free:
Robin Murphyadfec2e2016-09-12 17:13:55 +01001388 iommu_fwspec_free(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001389 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00001390}
1391
Will Deacon45ae7cf2013-06-24 18:31:25 +01001392static void arm_smmu_remove_device(struct device *dev)
1393{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001394 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001395 struct arm_smmu_master_cfg *cfg;
1396 struct arm_smmu_device *smmu;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301397 int ret;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001398
Robin Murphyadfec2e2016-09-12 17:13:55 +01001399 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyf80cd882016-09-14 15:21:39 +01001400 return;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001401
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001402 cfg = fwspec->iommu_priv;
1403 smmu = cfg->smmu;
1404
Sricharan Rd4a44f02018-12-04 11:52:10 +05301405 ret = arm_smmu_rpm_get(smmu);
1406 if (ret < 0)
1407 return;
1408
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001409 iommu_device_unlink(&smmu->iommu, dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001410 arm_smmu_master_free_smes(fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301411
1412 arm_smmu_rpm_put(smmu);
1413
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001414 iommu_group_remove_device(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001415 kfree(fwspec->iommu_priv);
1416 iommu_fwspec_free(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001417}
1418
Joerg Roedelaf659932015-10-21 23:51:41 +02001419static struct iommu_group *arm_smmu_device_group(struct device *dev)
1420{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001421 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001422 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy588888a2016-09-12 17:13:54 +01001423 struct iommu_group *group = NULL;
1424 int i, idx;
1425
Robin Murphyadfec2e2016-09-12 17:13:55 +01001426 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001427 if (group && smmu->s2crs[idx].group &&
1428 group != smmu->s2crs[idx].group)
1429 return ERR_PTR(-EINVAL);
1430
1431 group = smmu->s2crs[idx].group;
1432 }
1433
1434 if (group)
Robin Murphye1b44cb2016-11-11 17:59:22 +00001435 return iommu_group_ref_get(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001436
1437 if (dev_is_pci(dev))
1438 group = pci_device_group(dev);
Nipun Guptaeab03e22018-09-10 19:19:18 +05301439 else if (dev_is_fsl_mc(dev))
1440 group = fsl_mc_device_group(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001441 else
1442 group = generic_device_group(dev);
1443
Joerg Roedelaf659932015-10-21 23:51:41 +02001444 return group;
1445}
1446
Will Deaconc752ce42014-06-25 22:46:31 +01001447static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1448 enum iommu_attr attr, void *data)
1449{
Joerg Roedel1d672632015-03-26 13:43:10 +01001450 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001451
Robin Murphy44f68762018-09-20 17:10:27 +01001452 switch(domain->type) {
1453 case IOMMU_DOMAIN_UNMANAGED:
1454 switch (attr) {
1455 case DOMAIN_ATTR_NESTING:
1456 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1457 return 0;
1458 default:
1459 return -ENODEV;
1460 }
1461 break;
1462 case IOMMU_DOMAIN_DMA:
1463 switch (attr) {
1464 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1465 *(int *)data = smmu_domain->non_strict;
1466 return 0;
1467 default:
1468 return -ENODEV;
1469 }
1470 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001471 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001472 return -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001473 }
1474}
1475
1476static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1477 enum iommu_attr attr, void *data)
1478{
Will Deacon518f7132014-11-14 17:17:54 +00001479 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001480 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001481
Will Deacon518f7132014-11-14 17:17:54 +00001482 mutex_lock(&smmu_domain->init_mutex);
1483
Robin Murphy44f68762018-09-20 17:10:27 +01001484 switch(domain->type) {
1485 case IOMMU_DOMAIN_UNMANAGED:
1486 switch (attr) {
1487 case DOMAIN_ATTR_NESTING:
1488 if (smmu_domain->smmu) {
1489 ret = -EPERM;
1490 goto out_unlock;
1491 }
1492
1493 if (*(int *)data)
1494 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1495 else
1496 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1497 break;
1498 default:
1499 ret = -ENODEV;
Will Deacon518f7132014-11-14 17:17:54 +00001500 }
Robin Murphy44f68762018-09-20 17:10:27 +01001501 break;
1502 case IOMMU_DOMAIN_DMA:
1503 switch (attr) {
1504 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1505 smmu_domain->non_strict = *(int *)data;
1506 break;
1507 default:
1508 ret = -ENODEV;
1509 }
Will Deacon518f7132014-11-14 17:17:54 +00001510 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001511 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001512 ret = -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001513 }
Will Deacon518f7132014-11-14 17:17:54 +00001514out_unlock:
1515 mutex_unlock(&smmu_domain->init_mutex);
1516 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001517}
1518
Robin Murphy021bb842016-09-14 15:26:46 +01001519static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1520{
Robin Murphy56fbf602017-03-31 12:03:33 +01001521 u32 mask, fwid = 0;
Robin Murphy021bb842016-09-14 15:26:46 +01001522
1523 if (args->args_count > 0)
Robin Murphy0caf5f42019-08-15 19:37:23 +01001524 fwid |= FIELD_PREP(SMR_ID, args->args[0]);
Robin Murphy021bb842016-09-14 15:26:46 +01001525
1526 if (args->args_count > 1)
Robin Murphy0caf5f42019-08-15 19:37:23 +01001527 fwid |= FIELD_PREP(SMR_MASK, args->args[1]);
Robin Murphy56fbf602017-03-31 12:03:33 +01001528 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
Robin Murphy0caf5f42019-08-15 19:37:23 +01001529 fwid |= FIELD_PREP(SMR_MASK, mask);
Robin Murphy021bb842016-09-14 15:26:46 +01001530
1531 return iommu_fwspec_add_ids(dev, &fwid, 1);
1532}
1533
Eric Augerf3ebee82017-01-19 20:57:55 +00001534static void arm_smmu_get_resv_regions(struct device *dev,
1535 struct list_head *head)
1536{
1537 struct iommu_resv_region *region;
1538 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1539
1540 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00001541 prot, IOMMU_RESV_SW_MSI);
Eric Augerf3ebee82017-01-19 20:57:55 +00001542 if (!region)
1543 return;
1544
1545 list_add_tail(&region->list, head);
Robin Murphy273df962017-03-16 17:00:19 +00001546
1547 iommu_dma_get_resv_regions(dev, head);
Eric Augerf3ebee82017-01-19 20:57:55 +00001548}
1549
1550static void arm_smmu_put_resv_regions(struct device *dev,
1551 struct list_head *head)
1552{
1553 struct iommu_resv_region *entry, *next;
1554
1555 list_for_each_entry_safe(entry, next, head, list)
1556 kfree(entry);
1557}
1558
Will Deacon518f7132014-11-14 17:17:54 +00001559static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001560 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001561 .domain_alloc = arm_smmu_domain_alloc,
1562 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001563 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001564 .map = arm_smmu_map,
1565 .unmap = arm_smmu_unmap,
Robin Murphy44f68762018-09-20 17:10:27 +01001566 .flush_iotlb_all = arm_smmu_flush_iotlb_all,
Robin Murphy32b12442017-09-28 15:55:01 +01001567 .iotlb_sync = arm_smmu_iotlb_sync,
Will Deaconc752ce42014-06-25 22:46:31 +01001568 .iova_to_phys = arm_smmu_iova_to_phys,
1569 .add_device = arm_smmu_add_device,
1570 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001571 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001572 .domain_get_attr = arm_smmu_domain_get_attr,
1573 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy021bb842016-09-14 15:26:46 +01001574 .of_xlate = arm_smmu_of_xlate,
Eric Augerf3ebee82017-01-19 20:57:55 +00001575 .get_resv_regions = arm_smmu_get_resv_regions,
1576 .put_resv_regions = arm_smmu_put_resv_regions,
Will Deacon518f7132014-11-14 17:17:54 +00001577 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001578};
1579
1580static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1581{
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001582 int i;
Robin Murphy62b993a2019-08-15 19:37:36 +01001583 u32 reg;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001584
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001585 /* clear global FSR */
Robin Murphy00320ce2019-08-15 19:37:31 +01001586 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
1587 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001588
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001589 /*
1590 * Reset stream mapping groups: Initial values mark all SMRn as
1591 * invalid and all S2CRn as bypass unless overridden.
1592 */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001593 for (i = 0; i < smmu->num_mapping_groups; ++i)
1594 arm_smmu_write_sme(smmu, i);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001595
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001596 /* Make sure all context banks are disabled and clear CB_FSR */
1597 for (i = 0; i < smmu->num_context_banks; ++i) {
Robin Murphy90df3732017-08-08 14:56:14 +01001598 arm_smmu_write_context_bank(smmu, i);
Robin Murphy19713fd2019-08-15 19:37:30 +01001599 arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, FSR_FAULT);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001600 }
Will Deacon1463fe42013-07-31 19:21:27 +01001601
Will Deacon45ae7cf2013-06-24 18:31:25 +01001602 /* Invalidate the TLB, just in case */
Robin Murphy00320ce2019-08-15 19:37:31 +01001603 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLH, QCOM_DUMMY_VAL);
1604 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLNSNH, QCOM_DUMMY_VAL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001605
Robin Murphy00320ce2019-08-15 19:37:31 +01001606 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001607
Will Deacon45ae7cf2013-06-24 18:31:25 +01001608 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001609 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001610
1611 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001612 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001613
Robin Murphy25a1c962016-02-10 14:25:33 +00001614 /* Enable client access, handling unmatched streams as appropriate */
1615 reg &= ~sCR0_CLIENTPD;
1616 if (disable_bypass)
1617 reg |= sCR0_USFCFG;
1618 else
1619 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001620
1621 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001622 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001623
1624 /* Don't upgrade barriers */
Robin Murphy0caf5f42019-08-15 19:37:23 +01001625 reg &= ~(sCR0_BSU);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001626
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001627 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1628 reg |= sCR0_VMID16EN;
1629
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001630 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1631 reg |= sCR0_EXIDENABLE;
1632
Robin Murphy62b993a2019-08-15 19:37:36 +01001633 if (smmu->impl && smmu->impl->reset)
1634 smmu->impl->reset(smmu);
1635
Will Deacon45ae7cf2013-06-24 18:31:25 +01001636 /* Push the button */
Robin Murphy11febfc2017-03-30 17:56:31 +01001637 arm_smmu_tlb_sync_global(smmu);
Robin Murphy00320ce2019-08-15 19:37:31 +01001638 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001639}
1640
1641static int arm_smmu_id_size_to_bits(int size)
1642{
1643 switch (size) {
1644 case 0:
1645 return 32;
1646 case 1:
1647 return 36;
1648 case 2:
1649 return 40;
1650 case 3:
1651 return 42;
1652 case 4:
1653 return 44;
1654 case 5:
1655 default:
1656 return 48;
1657 }
1658}
1659
1660static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1661{
Robin Murphy490325e2019-08-15 19:37:26 +01001662 unsigned int size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001663 u32 id;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001664 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001665 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001666
1667 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001668 dev_notice(smmu->dev, "SMMUv%d with:\n",
1669 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001670
1671 /* ID0 */
Robin Murphy00320ce2019-08-15 19:37:31 +01001672 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001673
1674 /* Restrict available stages based on module parameter */
1675 if (force_stage == 1)
1676 id &= ~(ID0_S2TS | ID0_NTS);
1677 else if (force_stage == 2)
1678 id &= ~(ID0_S1TS | ID0_NTS);
1679
Will Deacon45ae7cf2013-06-24 18:31:25 +01001680 if (id & ID0_S1TS) {
1681 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1682 dev_notice(smmu->dev, "\tstage 1 translation\n");
1683 }
1684
1685 if (id & ID0_S2TS) {
1686 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1687 dev_notice(smmu->dev, "\tstage 2 translation\n");
1688 }
1689
1690 if (id & ID0_NTS) {
1691 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1692 dev_notice(smmu->dev, "\tnested translation\n");
1693 }
1694
1695 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001696 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001697 dev_err(smmu->dev, "\tno translation support!\n");
1698 return -ENODEV;
1699 }
1700
Robin Murphyb7862e32016-04-13 18:13:03 +01001701 if ((id & ID0_S1TS) &&
1702 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001703 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1704 dev_notice(smmu->dev, "\taddress translation ops\n");
1705 }
1706
Robin Murphybae2c2d2015-07-29 19:46:05 +01001707 /*
1708 * In order for DMA API calls to work properly, we must defer to what
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001709 * the FW says about coherency, regardless of what the hardware claims.
Robin Murphybae2c2d2015-07-29 19:46:05 +01001710 * Fortunately, this also opens up a workaround for systems where the
1711 * ID register value has ended up configured incorrectly.
1712 */
Robin Murphybae2c2d2015-07-29 19:46:05 +01001713 cttw_reg = !!(id & ID0_CTTW);
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001714 if (cttw_fw || cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001715 dev_notice(smmu->dev, "\t%scoherent table walk\n",
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001716 cttw_fw ? "" : "non-");
1717 if (cttw_fw != cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001718 dev_notice(smmu->dev,
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001719 "\t(IDR0.CTTW overridden by FW configuration)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001720
Robin Murphy21174242016-09-12 17:13:48 +01001721 /* Max. number of entries we have for stream matching/indexing */
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001722 if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
1723 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1724 size = 1 << 16;
1725 } else {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001726 size = 1 << FIELD_GET(ID0_NUMSIDB, id);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001727 }
Robin Murphy21174242016-09-12 17:13:48 +01001728 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001729 if (id & ID0_SMS) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001730 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy0caf5f42019-08-15 19:37:23 +01001731 size = FIELD_GET(ID0_NUMSMRG, id);
Robin Murphy21174242016-09-12 17:13:48 +01001732 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001733 dev_err(smmu->dev,
1734 "stream-matching supported, but no SMRs present!\n");
1735 return -ENODEV;
1736 }
1737
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001738 /* Zero-initialised to mark as invalid */
1739 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1740 GFP_KERNEL);
1741 if (!smmu->smrs)
1742 return -ENOMEM;
1743
Will Deacon45ae7cf2013-06-24 18:31:25 +01001744 dev_notice(smmu->dev,
Robin Murphy490325e2019-08-15 19:37:26 +01001745 "\tstream matching with %u register groups", size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001746 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001747 /* s2cr->type == 0 means translation, so initialise explicitly */
1748 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1749 GFP_KERNEL);
1750 if (!smmu->s2crs)
1751 return -ENOMEM;
1752 for (i = 0; i < size; i++)
1753 smmu->s2crs[i] = s2cr_init_val;
1754
Robin Murphy21174242016-09-12 17:13:48 +01001755 smmu->num_mapping_groups = size;
Robin Murphy588888a2016-09-12 17:13:54 +01001756 mutex_init(&smmu->stream_map_mutex);
Will Deacon8e517e72017-07-06 15:55:48 +01001757 spin_lock_init(&smmu->global_sync_lock);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001758
Robin Murphy7602b872016-04-28 17:12:09 +01001759 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1760 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1761 if (!(id & ID0_PTFS_NO_AARCH32S))
1762 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1763 }
1764
Will Deacon45ae7cf2013-06-24 18:31:25 +01001765 /* ID1 */
Robin Murphy00320ce2019-08-15 19:37:31 +01001766 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001767 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001768
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001769 /* Check for size mismatch of SMMU address space from mapped region */
Robin Murphy0caf5f42019-08-15 19:37:23 +01001770 size = 1 << (FIELD_GET(ID1_NUMPAGENDXB, id) + 1);
Robin Murphy490325e2019-08-15 19:37:26 +01001771 if (smmu->numpage != 2 * size << smmu->pgshift)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001772 dev_warn(smmu->dev,
Robin Murphy490325e2019-08-15 19:37:26 +01001773 "SMMU address space size (0x%x) differs from mapped region size (0x%x)!\n",
1774 2 * size << smmu->pgshift, smmu->numpage);
1775 /* Now properly encode NUMPAGE to subsequently derive SMMU_CB_BASE */
1776 smmu->numpage = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001777
Robin Murphy0caf5f42019-08-15 19:37:23 +01001778 smmu->num_s2_context_banks = FIELD_GET(ID1_NUMS2CB, id);
1779 smmu->num_context_banks = FIELD_GET(ID1_NUMCB, id);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001780 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1781 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1782 return -ENODEV;
1783 }
1784 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1785 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphy90df3732017-08-08 14:56:14 +01001786 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
1787 sizeof(*smmu->cbs), GFP_KERNEL);
1788 if (!smmu->cbs)
1789 return -ENOMEM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001790
1791 /* ID2 */
Robin Murphy00320ce2019-08-15 19:37:31 +01001792 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2);
Robin Murphy0caf5f42019-08-15 19:37:23 +01001793 size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_IAS, id));
Will Deacon518f7132014-11-14 17:17:54 +00001794 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001795
Will Deacon518f7132014-11-14 17:17:54 +00001796 /* The output mask is also applied for bypass */
Robin Murphy0caf5f42019-08-15 19:37:23 +01001797 size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_OAS, id));
Will Deacon518f7132014-11-14 17:17:54 +00001798 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001799
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001800 if (id & ID2_VMID16)
1801 smmu->features |= ARM_SMMU_FEAT_VMID16;
1802
Robin Murphyf1d84542015-03-04 16:41:05 +00001803 /*
1804 * What the page table walker can address actually depends on which
1805 * descriptor format is in use, but since a) we don't know that yet,
1806 * and b) it can vary per context bank, this will have to do...
1807 */
1808 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1809 dev_warn(smmu->dev,
1810 "failed to set DMA mask for table walker\n");
1811
Robin Murphyb7862e32016-04-13 18:13:03 +01001812 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001813 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001814 if (smmu->version == ARM_SMMU_V1_64K)
1815 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001816 } else {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001817 size = FIELD_GET(ID2_UBS, id);
Will Deacon518f7132014-11-14 17:17:54 +00001818 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001819 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001820 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001821 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001822 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001823 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001824 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001825 }
1826
Robin Murphy7602b872016-04-28 17:12:09 +01001827 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01001828 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01001829 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01001830 if (smmu->features &
1831 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01001832 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01001833 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01001834 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01001835 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01001836 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01001837
Robin Murphyd5466352016-05-09 17:20:09 +01001838 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1839 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1840 else
1841 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1842 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1843 smmu->pgsize_bitmap);
1844
Will Deacon518f7132014-11-14 17:17:54 +00001845
Will Deacon28d60072014-09-01 16:24:48 +01001846 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1847 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001848 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001849
1850 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1851 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001852 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001853
Robin Murphy3995e182019-08-15 19:37:35 +01001854 if (smmu->impl && smmu->impl->cfg_probe)
1855 return smmu->impl->cfg_probe(smmu);
1856
Will Deacon45ae7cf2013-06-24 18:31:25 +01001857 return 0;
1858}
1859
Robin Murphy67b65a32016-04-13 18:12:57 +01001860struct arm_smmu_match_data {
1861 enum arm_smmu_arch_version version;
1862 enum arm_smmu_implementation model;
1863};
1864
1865#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
Sricharan R96a299d2018-12-04 11:52:09 +05301866static const struct arm_smmu_match_data name = { .version = ver, .model = imp }
Robin Murphy67b65a32016-04-13 18:12:57 +01001867
1868ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1869ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01001870ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001871ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01001872ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Vivek Gautam89cddc52018-12-04 11:52:13 +05301873ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01001874
Joerg Roedel09b52692014-10-02 12:24:45 +02001875static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01001876 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1877 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1878 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01001879 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001880 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01001881 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Vivek Gautam89cddc52018-12-04 11:52:13 +05301882 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01001883 { },
1884};
Robin Murphy09360402014-08-28 17:51:59 +01001885
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001886#ifdef CONFIG_ACPI
1887static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
1888{
1889 int ret = 0;
1890
1891 switch (model) {
1892 case ACPI_IORT_SMMU_V1:
1893 case ACPI_IORT_SMMU_CORELINK_MMU400:
1894 smmu->version = ARM_SMMU_V1;
1895 smmu->model = GENERIC_SMMU;
1896 break;
Robin Murphy84c24372017-06-19 16:41:56 +01001897 case ACPI_IORT_SMMU_CORELINK_MMU401:
1898 smmu->version = ARM_SMMU_V1_64K;
1899 smmu->model = GENERIC_SMMU;
1900 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001901 case ACPI_IORT_SMMU_V2:
1902 smmu->version = ARM_SMMU_V2;
1903 smmu->model = GENERIC_SMMU;
1904 break;
1905 case ACPI_IORT_SMMU_CORELINK_MMU500:
1906 smmu->version = ARM_SMMU_V2;
1907 smmu->model = ARM_MMU500;
1908 break;
Robin Murphy84c24372017-06-19 16:41:56 +01001909 case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
1910 smmu->version = ARM_SMMU_V2;
1911 smmu->model = CAVIUM_SMMUV2;
1912 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001913 default:
1914 ret = -ENODEV;
1915 }
1916
1917 return ret;
1918}
1919
1920static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
1921 struct arm_smmu_device *smmu)
1922{
1923 struct device *dev = smmu->dev;
1924 struct acpi_iort_node *node =
1925 *(struct acpi_iort_node **)dev_get_platdata(dev);
1926 struct acpi_iort_smmu *iort_smmu;
1927 int ret;
1928
1929 /* Retrieve SMMU1/2 specific data */
1930 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
1931
1932 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
1933 if (ret < 0)
1934 return ret;
1935
1936 /* Ignore the configuration access interrupt */
1937 smmu->num_global_irqs = 1;
1938
1939 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
1940 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1941
1942 return 0;
1943}
1944#else
1945static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
1946 struct arm_smmu_device *smmu)
1947{
1948 return -ENODEV;
1949}
1950#endif
1951
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001952static int arm_smmu_device_dt_probe(struct platform_device *pdev,
1953 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001954{
Robin Murphy67b65a32016-04-13 18:12:57 +01001955 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001956 struct device *dev = &pdev->dev;
Robin Murphy021bb842016-09-14 15:26:46 +01001957 bool legacy_binding;
1958
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001959 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1960 &smmu->num_global_irqs)) {
1961 dev_err(dev, "missing #global-interrupts property\n");
1962 return -ENODEV;
1963 }
1964
1965 data = of_device_get_match_data(dev);
1966 smmu->version = data->version;
1967 smmu->model = data->model;
1968
Robin Murphy021bb842016-09-14 15:26:46 +01001969 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
1970 if (legacy_binding && !using_generic_binding) {
1971 if (!using_legacy_binding)
1972 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
1973 using_legacy_binding = true;
1974 } else if (!legacy_binding && !using_legacy_binding) {
1975 using_generic_binding = true;
1976 } else {
1977 dev_err(dev, "not probing due to mismatched DT properties\n");
1978 return -ENODEV;
1979 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001980
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001981 if (of_dma_is_coherent(dev->of_node))
1982 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1983
1984 return 0;
1985}
1986
Robin Murphyf6810c12017-04-10 16:51:05 +05301987static void arm_smmu_bus_init(void)
1988{
1989 /* Oh, for a proper bus abstraction */
1990 if (!iommu_present(&platform_bus_type))
1991 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
1992#ifdef CONFIG_ARM_AMBA
1993 if (!iommu_present(&amba_bustype))
1994 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
1995#endif
1996#ifdef CONFIG_PCI
1997 if (!iommu_present(&pci_bus_type)) {
1998 pci_request_acs();
1999 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2000 }
2001#endif
Nipun Guptaeab03e22018-09-10 19:19:18 +05302002#ifdef CONFIG_FSL_MC_BUS
2003 if (!iommu_present(&fsl_mc_bus_type))
2004 bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
2005#endif
Robin Murphyf6810c12017-04-10 16:51:05 +05302006}
2007
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002008static int arm_smmu_device_probe(struct platform_device *pdev)
2009{
2010 struct resource *res;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002011 resource_size_t ioaddr;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002012 struct arm_smmu_device *smmu;
2013 struct device *dev = &pdev->dev;
2014 int num_irqs, i, err;
2015
Will Deacon45ae7cf2013-06-24 18:31:25 +01002016 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2017 if (!smmu) {
2018 dev_err(dev, "failed to allocate arm_smmu_device\n");
2019 return -ENOMEM;
2020 }
2021 smmu->dev = dev;
2022
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002023 if (dev->of_node)
2024 err = arm_smmu_device_dt_probe(pdev, smmu);
2025 else
2026 err = arm_smmu_device_acpi_probe(pdev, smmu);
2027
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002028 if (err)
2029 return err;
Robin Murphy09360402014-08-28 17:51:59 +01002030
Robin Murphyfc058d32019-08-15 19:37:33 +01002031 smmu = arm_smmu_impl_init(smmu);
2032 if (IS_ERR(smmu))
2033 return PTR_ERR(smmu);
2034
Will Deacon45ae7cf2013-06-24 18:31:25 +01002035 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002036 ioaddr = res->start;
Julia Lawall8a7f4312013-08-19 12:20:37 +01002037 smmu->base = devm_ioremap_resource(dev, res);
2038 if (IS_ERR(smmu->base))
2039 return PTR_ERR(smmu->base);
Robin Murphy490325e2019-08-15 19:37:26 +01002040 /*
2041 * The resource size should effectively match the value of SMMU_TOP;
2042 * stash that temporarily until we know PAGESIZE to validate it with.
2043 */
2044 smmu->numpage = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002045
Will Deacon45ae7cf2013-06-24 18:31:25 +01002046 num_irqs = 0;
2047 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2048 num_irqs++;
2049 if (num_irqs > smmu->num_global_irqs)
2050 smmu->num_context_irqs++;
2051 }
2052
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002053 if (!smmu->num_context_irqs) {
2054 dev_err(dev, "found %d interrupts but expected at least %d\n",
2055 num_irqs, smmu->num_global_irqs + 1);
2056 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002057 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002058
Kees Cooka86854d2018-06-12 14:07:58 -07002059 smmu->irqs = devm_kcalloc(dev, num_irqs, sizeof(*smmu->irqs),
Will Deacon45ae7cf2013-06-24 18:31:25 +01002060 GFP_KERNEL);
2061 if (!smmu->irqs) {
2062 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2063 return -ENOMEM;
2064 }
2065
2066 for (i = 0; i < num_irqs; ++i) {
2067 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002068
Will Deacon45ae7cf2013-06-24 18:31:25 +01002069 if (irq < 0) {
2070 dev_err(dev, "failed to get irq index %d\n", i);
2071 return -ENODEV;
2072 }
2073 smmu->irqs[i] = irq;
2074 }
2075
Sricharan R96a299d2018-12-04 11:52:09 +05302076 err = devm_clk_bulk_get_all(dev, &smmu->clks);
2077 if (err < 0) {
2078 dev_err(dev, "failed to get clocks %d\n", err);
2079 return err;
2080 }
2081 smmu->num_clks = err;
2082
2083 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
2084 if (err)
2085 return err;
2086
Olav Haugan3c8766d2014-08-22 17:12:32 -07002087 err = arm_smmu_device_cfg_probe(smmu);
2088 if (err)
2089 return err;
2090
Vivek Gautamd1e20222018-07-19 23:23:56 +05302091 if (smmu->version == ARM_SMMU_V2) {
2092 if (smmu->num_context_banks > smmu->num_context_irqs) {
2093 dev_err(dev,
2094 "found only %d context irq(s) but %d required\n",
2095 smmu->num_context_irqs, smmu->num_context_banks);
2096 return -ENODEV;
2097 }
2098
2099 /* Ignore superfluous interrupts */
2100 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002101 }
2102
Will Deacon45ae7cf2013-06-24 18:31:25 +01002103 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08002104 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2105 arm_smmu_global_fault,
2106 IRQF_SHARED,
2107 "arm-smmu global fault",
2108 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002109 if (err) {
2110 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2111 i, smmu->irqs[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01002112 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002113 }
2114 }
2115
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002116 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2117 "smmu.%pa", &ioaddr);
2118 if (err) {
2119 dev_err(dev, "Failed to register iommu in sysfs\n");
2120 return err;
2121 }
2122
2123 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2124 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2125
2126 err = iommu_device_register(&smmu->iommu);
2127 if (err) {
2128 dev_err(dev, "Failed to register iommu\n");
2129 return err;
2130 }
2131
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002132 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01002133 arm_smmu_device_reset(smmu);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03002134 arm_smmu_test_smr_masks(smmu);
Robin Murphy021bb842016-09-14 15:26:46 +01002135
Robin Murphyf6810c12017-04-10 16:51:05 +05302136 /*
Sricharan Rd4a44f02018-12-04 11:52:10 +05302137 * We want to avoid touching dev->power.lock in fastpaths unless
2138 * it's really going to do something useful - pm_runtime_enabled()
2139 * can serve as an ideal proxy for that decision. So, conditionally
2140 * enable pm_runtime.
2141 */
2142 if (dev->pm_domain) {
2143 pm_runtime_set_active(dev);
2144 pm_runtime_enable(dev);
2145 }
2146
2147 /*
Robin Murphyf6810c12017-04-10 16:51:05 +05302148 * For ACPI and generic DT bindings, an SMMU will be probed before
2149 * any device which might need it, so we want the bus ops in place
2150 * ready to handle default domain setup as soon as any SMMU exists.
2151 */
2152 if (!using_legacy_binding)
2153 arm_smmu_bus_init();
2154
Will Deacon45ae7cf2013-06-24 18:31:25 +01002155 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002156}
2157
Robin Murphyf6810c12017-04-10 16:51:05 +05302158/*
2159 * With the legacy DT binding in play, though, we have no guarantees about
2160 * probe order, but then we're also not doing default domains, so we can
2161 * delay setting bus ops until we're sure every possible SMMU is ready,
2162 * and that way ensure that no add_device() calls get missed.
2163 */
2164static int arm_smmu_legacy_bus_init(void)
2165{
2166 if (using_legacy_binding)
2167 arm_smmu_bus_init();
2168 return 0;
2169}
2170device_initcall_sync(arm_smmu_legacy_bus_init);
2171
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002172static void arm_smmu_device_shutdown(struct platform_device *pdev)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002173{
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002174 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002175
2176 if (!smmu)
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002177 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002178
Will Deaconecfadb62013-07-31 19:21:28 +01002179 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002180 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002181
Sricharan Rd4a44f02018-12-04 11:52:10 +05302182 arm_smmu_rpm_get(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002183 /* Turn the thing off */
Robin Murphy00320ce2019-08-15 19:37:31 +01002184 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, sCR0_CLIENTPD);
Sricharan Rd4a44f02018-12-04 11:52:10 +05302185 arm_smmu_rpm_put(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302186
Sricharan Rd4a44f02018-12-04 11:52:10 +05302187 if (pm_runtime_enabled(smmu->dev))
2188 pm_runtime_force_suspend(smmu->dev);
2189 else
2190 clk_bulk_disable(smmu->num_clks, smmu->clks);
2191
2192 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
Nate Watterson7aa86192017-06-29 18:18:15 -04002193}
2194
Sricharan R96a299d2018-12-04 11:52:09 +05302195static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
Robin Murphya2d866f2017-08-08 14:56:15 +01002196{
2197 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
Sricharan R96a299d2018-12-04 11:52:09 +05302198 int ret;
2199
2200 ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
2201 if (ret)
2202 return ret;
Robin Murphya2d866f2017-08-08 14:56:15 +01002203
2204 arm_smmu_device_reset(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302205
Will Deacon45ae7cf2013-06-24 18:31:25 +01002206 return 0;
2207}
2208
Sricharan R96a299d2018-12-04 11:52:09 +05302209static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
Dan Carpenter6614ee72013-08-21 09:34:20 +01002210{
Sricharan R96a299d2018-12-04 11:52:09 +05302211 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2212
2213 clk_bulk_disable(smmu->num_clks, smmu->clks);
2214
2215 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002216}
2217
Robin Murphya2d866f2017-08-08 14:56:15 +01002218static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
2219{
Sricharan R96a299d2018-12-04 11:52:09 +05302220 if (pm_runtime_suspended(dev))
2221 return 0;
Robin Murphya2d866f2017-08-08 14:56:15 +01002222
Sricharan R96a299d2018-12-04 11:52:09 +05302223 return arm_smmu_runtime_resume(dev);
Robin Murphya2d866f2017-08-08 14:56:15 +01002224}
2225
Sricharan R96a299d2018-12-04 11:52:09 +05302226static int __maybe_unused arm_smmu_pm_suspend(struct device *dev)
2227{
2228 if (pm_runtime_suspended(dev))
2229 return 0;
2230
2231 return arm_smmu_runtime_suspend(dev);
2232}
2233
2234static const struct dev_pm_ops arm_smmu_pm_ops = {
2235 SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_pm_suspend, arm_smmu_pm_resume)
2236 SET_RUNTIME_PM_OPS(arm_smmu_runtime_suspend,
2237 arm_smmu_runtime_resume, NULL)
2238};
Robin Murphya2d866f2017-08-08 14:56:15 +01002239
Will Deacon45ae7cf2013-06-24 18:31:25 +01002240static struct platform_driver arm_smmu_driver = {
2241 .driver = {
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002242 .name = "arm-smmu",
2243 .of_match_table = of_match_ptr(arm_smmu_of_match),
2244 .pm = &arm_smmu_pm_ops,
2245 .suppress_bind_attrs = true,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002246 },
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002247 .probe = arm_smmu_device_probe,
Nate Watterson7aa86192017-06-29 18:18:15 -04002248 .shutdown = arm_smmu_device_shutdown,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002249};
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002250builtin_platform_driver(arm_smmu_driver);