blob: b18aac4c105eef088d03cb624011dcc25284f9e5 [file] [log] [blame]
Thomas Gleixner45051532019-05-29 16:57:47 -07001// SPDX-License-Identifier: GPL-2.0-only
Will Deacon45ae7cf2013-06-24 18:31:25 +01002/*
3 * IOMMU API for ARM architected SMMU implementations.
4 *
Will Deacon45ae7cf2013-06-24 18:31:25 +01005 * Copyright (C) 2013 ARM Limited
6 *
7 * Author: Will Deacon <will.deacon@arm.com>
8 *
9 * This driver currently supports:
10 * - SMMUv1 and v2 implementations
11 * - Stream-matching and stream-indexing
12 * - v7/v8 long-descriptor format
13 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010014 * - Context fault reporting
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +030015 * - Extended Stream ID (16 bit)
Will Deacon45ae7cf2013-06-24 18:31:25 +010016 */
17
18#define pr_fmt(fmt) "arm-smmu: " fmt
19
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +000020#include <linux/acpi.h>
21#include <linux/acpi_iort.h>
Robin Murphy0caf5f42019-08-15 19:37:23 +010022#include <linux/bitfield.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010023#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000024#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010025#include <linux/dma-mapping.h>
26#include <linux/err.h>
27#include <linux/interrupt.h>
28#include <linux/io.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000029#include <linux/iopoll.h>
Paul Gortmakeraddb672f2018-12-01 14:19:16 -050030#include <linux/init.h>
31#include <linux/moduleparam.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010033#include <linux/of_address.h>
Robin Murphyd6fc5d92016-09-12 17:13:52 +010034#include <linux/of_device.h>
Robin Murphyadfec2e2016-09-12 17:13:55 +010035#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010036#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010037#include <linux/platform_device.h>
Sricharan R96a299d2018-12-04 11:52:09 +053038#include <linux/pm_runtime.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/slab.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010040
41#include <linux/amba/bus.h>
Nipun Guptaeab03e22018-09-10 19:19:18 +053042#include <linux/fsl/mc.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010043
Robin Murphyc5fc6482019-08-15 19:37:32 +010044#include "arm-smmu.h"
Rob Clark2b037742017-08-09 10:43:03 -040045
Robin Murphy4e4abae2019-06-03 14:15:37 +020046/*
47 * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
48 * global register space are still, in fact, using a hypervisor to mediate it
49 * by trapping and emulating register accesses. Sadly, some deployed versions
50 * of said trapping code have bugs wherein they go horribly wrong for stores
51 * using r31 (i.e. XZR/WZR) as the source register.
52 */
53#define QCOM_DUMMY_VAL -1
54
Rob Clark2b037742017-08-09 10:43:03 -040055#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
56#define TLB_SPIN_COUNT 10
Will Deacon45ae7cf2013-06-24 18:31:25 +010057
Eric Augerf3ebee82017-01-19 20:57:55 +000058#define MSI_IOVA_BASE 0x8000000
59#define MSI_IOVA_LENGTH 0x100000
60
Will Deacon4cf740b2014-07-14 19:47:39 +010061static int force_stage;
Paul Gortmakeraddb672f2018-12-01 14:19:16 -050062/*
63 * not really modular, but the easiest way to keep compat with existing
64 * bootargs behaviour is to continue using module_param() here.
65 */
Robin Murphy25a1c962016-02-10 14:25:33 +000066module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +010067MODULE_PARM_DESC(force_stage,
68 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Douglas Anderson954a03b2019-03-01 11:20:17 -080069static bool disable_bypass =
70 IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT);
Robin Murphy25a1c962016-02-10 14:25:33 +000071module_param(disable_bypass, bool, S_IRUGO);
72MODULE_PARM_DESC(disable_bypass,
73 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +010074
Robin Murphy8e8b2032016-09-12 17:13:50 +010075struct arm_smmu_s2cr {
Robin Murphy588888a2016-09-12 17:13:54 +010076 struct iommu_group *group;
77 int count;
Robin Murphy8e8b2032016-09-12 17:13:50 +010078 enum arm_smmu_s2cr_type type;
79 enum arm_smmu_s2cr_privcfg privcfg;
80 u8 cbndx;
81};
82
83#define s2cr_init_val (struct arm_smmu_s2cr){ \
84 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
85}
86
Will Deacon45ae7cf2013-06-24 18:31:25 +010087struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +010088 u16 mask;
89 u16 id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +010090 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +010091};
92
Robin Murphy90df3732017-08-08 14:56:14 +010093struct arm_smmu_cb {
94 u64 ttbr[2];
95 u32 tcr[2];
96 u32 mair[2];
97 struct arm_smmu_cfg *cfg;
98};
99
Will Deacona9a1b0b2014-05-01 18:05:08 +0100100struct arm_smmu_master_cfg {
Robin Murphyf80cd882016-09-14 15:21:39 +0100101 struct arm_smmu_device *smmu;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100102 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100103};
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100104#define INVALID_SMENDX -1
Robin Murphyadfec2e2016-09-12 17:13:55 +0100105#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
106#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000107#define fwspec_smendx(fw, i) \
108 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphyadfec2e2016-09-12 17:13:55 +0100109#define for_each_cfg_sme(fw, i, idx) \
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000110 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100111
Robin Murphy021bb842016-09-14 15:26:46 +0100112static bool using_legacy_binding, using_generic_binding;
113
Sricharan Rd4a44f02018-12-04 11:52:10 +0530114static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
115{
116 if (pm_runtime_enabled(smmu->dev))
117 return pm_runtime_get_sync(smmu->dev);
118
119 return 0;
120}
121
122static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
123{
124 if (pm_runtime_enabled(smmu->dev))
125 pm_runtime_put(smmu->dev);
126}
127
Joerg Roedel1d672632015-03-26 13:43:10 +0100128static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
129{
130 return container_of(dom, struct arm_smmu_domain, domain);
131}
132
Will Deacon8f68f8e2014-07-15 11:27:08 +0100133static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100134{
135 if (dev_is_pci(dev)) {
136 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700137
Will Deacona9a1b0b2014-05-01 18:05:08 +0100138 while (!pci_is_root_bus(bus))
139 bus = bus->parent;
Robin Murphyf80cd882016-09-14 15:21:39 +0100140 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100141 }
142
Robin Murphyf80cd882016-09-14 15:21:39 +0100143 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100144}
145
Robin Murphyf80cd882016-09-14 15:21:39 +0100146static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100147{
Robin Murphyf80cd882016-09-14 15:21:39 +0100148 *((__be32 *)data) = cpu_to_be32(alias);
149 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100150}
151
Robin Murphyf80cd882016-09-14 15:21:39 +0100152static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100153{
Robin Murphyf80cd882016-09-14 15:21:39 +0100154 struct of_phandle_iterator *it = *(void **)data;
155 struct device_node *np = it->node;
156 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100157
Robin Murphyf80cd882016-09-14 15:21:39 +0100158 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
Uwe Kleine-Königc680e9a2019-08-24 15:28:45 +0200159 "#stream-id-cells", -1)
Robin Murphyf80cd882016-09-14 15:21:39 +0100160 if (it->node == np) {
161 *(void **)data = dev;
162 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700163 }
Robin Murphyf80cd882016-09-14 15:21:39 +0100164 it->node = np;
165 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100166}
167
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100168static struct platform_driver arm_smmu_driver;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100169static struct iommu_ops arm_smmu_ops;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100170
Robin Murphyadfec2e2016-09-12 17:13:55 +0100171static int arm_smmu_register_legacy_master(struct device *dev,
172 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100173{
Robin Murphyadfec2e2016-09-12 17:13:55 +0100174 struct device *smmu_dev;
Robin Murphyf80cd882016-09-14 15:21:39 +0100175 struct device_node *np;
176 struct of_phandle_iterator it;
177 void *data = &it;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100178 u32 *sids;
Robin Murphyf80cd882016-09-14 15:21:39 +0100179 __be32 pci_sid;
180 int err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100181
Robin Murphyf80cd882016-09-14 15:21:39 +0100182 np = dev_get_dev_node(dev);
183 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
184 of_node_put(np);
185 return -ENODEV;
186 }
187
188 it.node = np;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100189 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
190 __find_legacy_master_phandle);
Robin Murphyadfec2e2016-09-12 17:13:55 +0100191 smmu_dev = data;
Robin Murphyf80cd882016-09-14 15:21:39 +0100192 of_node_put(np);
193 if (err == 0)
194 return -ENODEV;
195 if (err < 0)
196 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100197
Robin Murphyf80cd882016-09-14 15:21:39 +0100198 if (dev_is_pci(dev)) {
199 /* "mmu-masters" assumes Stream ID == Requester ID */
200 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
201 &pci_sid);
202 it.cur = &pci_sid;
203 it.cur_count = 1;
204 }
205
Robin Murphyadfec2e2016-09-12 17:13:55 +0100206 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
207 &arm_smmu_ops);
208 if (err)
209 return err;
210
211 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
212 if (!sids)
Robin Murphyf80cd882016-09-14 15:21:39 +0100213 return -ENOMEM;
214
Robin Murphyadfec2e2016-09-12 17:13:55 +0100215 *smmu = dev_get_drvdata(smmu_dev);
216 of_phandle_iterator_args(&it, sids, it.cur_count);
217 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
218 kfree(sids);
219 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100220}
221
222static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
223{
224 int idx;
225
226 do {
227 idx = find_next_zero_bit(map, end, start);
228 if (idx == end)
229 return -ENOSPC;
230 } while (test_and_set_bit(idx, map));
231
232 return idx;
233}
234
235static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
236{
237 clear_bit(idx, map);
238}
239
240/* Wait for any pending TLB invalidations to complete */
Robin Murphy19713fd2019-08-15 19:37:30 +0100241static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
242 int sync, int status)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100243{
Robin Murphy8513c892017-03-30 17:56:32 +0100244 unsigned int spin_cnt, delay;
Robin Murphy19713fd2019-08-15 19:37:30 +0100245 u32 reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100246
Robin Murphy19713fd2019-08-15 19:37:30 +0100247 arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
Robin Murphy8513c892017-03-30 17:56:32 +0100248 for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
249 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100250 reg = arm_smmu_readl(smmu, page, status);
251 if (!(reg & sTLBGSTATUS_GSACTIVE))
Robin Murphy8513c892017-03-30 17:56:32 +0100252 return;
253 cpu_relax();
Will Deacon45ae7cf2013-06-24 18:31:25 +0100254 }
Robin Murphy8513c892017-03-30 17:56:32 +0100255 udelay(delay);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100256 }
Robin Murphy8513c892017-03-30 17:56:32 +0100257 dev_err_ratelimited(smmu->dev,
258 "TLB sync timed out -- SMMU may be deadlocked\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +0100259}
260
Robin Murphy11febfc2017-03-30 17:56:31 +0100261static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
Will Deacon1463fe42013-07-31 19:21:27 +0100262{
Will Deacon8e517e72017-07-06 15:55:48 +0100263 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100264
Will Deacon8e517e72017-07-06 15:55:48 +0100265 spin_lock_irqsave(&smmu->global_sync_lock, flags);
Robin Murphy00320ce2019-08-15 19:37:31 +0100266 __arm_smmu_tlb_sync(smmu, ARM_SMMU_GR0, ARM_SMMU_GR0_sTLBGSYNC,
Robin Murphy19713fd2019-08-15 19:37:30 +0100267 ARM_SMMU_GR0_sTLBGSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100268 spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000269}
270
Robin Murphy11febfc2017-03-30 17:56:31 +0100271static void arm_smmu_tlb_sync_context(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100272{
Will Deacon518f7132014-11-14 17:17:54 +0000273 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy11febfc2017-03-30 17:56:31 +0100274 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon8e517e72017-07-06 15:55:48 +0100275 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100276
Will Deacon8e517e72017-07-06 15:55:48 +0100277 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy19713fd2019-08-15 19:37:30 +0100278 __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx),
279 ARM_SMMU_CB_TLBSYNC, ARM_SMMU_CB_TLBSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100280 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000281}
282
Robin Murphy11febfc2017-03-30 17:56:31 +0100283static void arm_smmu_tlb_sync_vmid(void *cookie)
284{
285 struct arm_smmu_domain *smmu_domain = cookie;
286
287 arm_smmu_tlb_sync_global(smmu_domain->smmu);
288}
289
290static void arm_smmu_tlb_inv_context_s1(void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000291{
292 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy44f68762018-09-20 17:10:27 +0100293 /*
Robin Murphy19713fd2019-08-15 19:37:30 +0100294 * The TLBI write may be relaxed, so ensure that PTEs cleared by the
295 * current CPU are visible beforehand.
Robin Murphy44f68762018-09-20 17:10:27 +0100296 */
Robin Murphy19713fd2019-08-15 19:37:30 +0100297 wmb();
298 arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
299 ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
Robin Murphy11febfc2017-03-30 17:56:31 +0100300 arm_smmu_tlb_sync_context(cookie);
301}
302
303static void arm_smmu_tlb_inv_context_s2(void *cookie)
304{
305 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100306 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100307
Robin Murphy00320ce2019-08-15 19:37:31 +0100308 /* See above */
309 wmb();
310 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
Robin Murphy11febfc2017-03-30 17:56:31 +0100311 arm_smmu_tlb_sync_global(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100312}
313
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100314static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
315 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000316{
317 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100318 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000319 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy19713fd2019-08-15 19:37:30 +0100320 int reg, idx = cfg->cbndx;
Will Deacon518f7132014-11-14 17:17:54 +0000321
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100322 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
Will Deacon7d321bd32018-10-01 12:42:49 +0100323 wmb();
324
Robin Murphy19713fd2019-08-15 19:37:30 +0100325 reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
Will Deacon518f7132014-11-14 17:17:54 +0000326
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100327 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
328 iova = (iova >> 12) << 12;
329 iova |= cfg->asid;
Robin Murphy75df1382015-12-07 18:18:52 +0000330 do {
Robin Murphy19713fd2019-08-15 19:37:30 +0100331 arm_smmu_cb_write(smmu, idx, reg, iova);
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100332 iova += granule;
333 } while (size -= granule);
334 } else {
335 iova >>= 12;
336 iova |= (u64)cfg->asid << 48;
337 do {
Robin Murphy19713fd2019-08-15 19:37:30 +0100338 arm_smmu_cb_writeq(smmu, idx, reg, iova);
Robin Murphy75df1382015-12-07 18:18:52 +0000339 iova += granule >> 12;
340 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000341 }
342}
343
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100344static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
345 size_t granule, bool leaf, void *cookie)
346{
347 struct arm_smmu_domain *smmu_domain = cookie;
348 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy19713fd2019-08-15 19:37:30 +0100349 int reg, idx = smmu_domain->cfg.cbndx;
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100350
351 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
352 wmb();
353
Robin Murphy19713fd2019-08-15 19:37:30 +0100354 reg = leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100355 iova >>= 12;
356 do {
Robin Murphy61005762019-08-15 19:37:28 +0100357 if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy19713fd2019-08-15 19:37:30 +0100358 arm_smmu_cb_writeq(smmu, idx, reg, iova);
Robin Murphy61005762019-08-15 19:37:28 +0100359 else
Robin Murphy19713fd2019-08-15 19:37:30 +0100360 arm_smmu_cb_write(smmu, idx, reg, iova);
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100361 iova += granule >> 12;
362 } while (size -= granule);
363}
364
Robin Murphy11febfc2017-03-30 17:56:31 +0100365/*
366 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
367 * almost negligible, but the benefit of getting the first one in as far ahead
368 * of the sync as possible is significant, hence we don't just make this a
Will Deacone953f7f2019-07-02 16:44:50 +0100369 * no-op and set .tlb_sync to arm_smmu_tlb_inv_context_s2() as you might think.
Robin Murphy11febfc2017-03-30 17:56:31 +0100370 */
371static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
372 size_t granule, bool leaf, void *cookie)
373{
374 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy00320ce2019-08-15 19:37:31 +0100375 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy11febfc2017-03-30 17:56:31 +0100376
Robin Murphy00320ce2019-08-15 19:37:31 +0100377 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
Will Deacon7d321bd32018-10-01 12:42:49 +0100378 wmb();
379
Robin Murphy00320ce2019-08-15 19:37:31 +0100380 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
Robin Murphy11febfc2017-03-30 17:56:31 +0100381}
382
Will Deacon05aed942019-07-02 16:44:25 +0100383static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
384 size_t granule, void *cookie)
385{
386 struct arm_smmu_domain *smmu_domain = cookie;
Will Deaconabfd6fe2019-07-02 16:44:41 +0100387 const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
Will Deacon05aed942019-07-02 16:44:25 +0100388
Will Deaconabfd6fe2019-07-02 16:44:41 +0100389 ops->tlb_inv_range(iova, size, granule, false, cookie);
Will Deacone953f7f2019-07-02 16:44:50 +0100390 ops->tlb_sync(cookie);
Will Deacon05aed942019-07-02 16:44:25 +0100391}
392
393static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
394 size_t granule, void *cookie)
395{
396 struct arm_smmu_domain *smmu_domain = cookie;
Will Deaconabfd6fe2019-07-02 16:44:41 +0100397 const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
Will Deacon05aed942019-07-02 16:44:25 +0100398
Will Deaconabfd6fe2019-07-02 16:44:41 +0100399 ops->tlb_inv_range(iova, size, granule, true, cookie);
Will Deacone953f7f2019-07-02 16:44:50 +0100400 ops->tlb_sync(cookie);
Will Deacon05aed942019-07-02 16:44:25 +0100401}
402
Will Deacon3951c412019-07-02 16:45:15 +0100403static void arm_smmu_tlb_add_page(struct iommu_iotlb_gather *gather,
404 unsigned long iova, size_t granule,
Will Deaconabfd6fe2019-07-02 16:44:41 +0100405 void *cookie)
406{
407 struct arm_smmu_domain *smmu_domain = cookie;
408 const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
409
410 ops->tlb_inv_range(iova, granule, granule, true, cookie);
411}
412
413static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops = {
414 .tlb = {
415 .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
416 .tlb_flush_walk = arm_smmu_tlb_inv_walk,
417 .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
418 .tlb_add_page = arm_smmu_tlb_add_page,
Will Deaconabfd6fe2019-07-02 16:44:41 +0100419 },
Will Deacon15542402019-08-23 15:05:45 +0100420 .tlb_inv_range = arm_smmu_tlb_inv_range_s1,
Will Deacone953f7f2019-07-02 16:44:50 +0100421 .tlb_sync = arm_smmu_tlb_sync_context,
Robin Murphy11febfc2017-03-30 17:56:31 +0100422};
423
Will Deaconabfd6fe2019-07-02 16:44:41 +0100424static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
425 .tlb = {
426 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
427 .tlb_flush_walk = arm_smmu_tlb_inv_walk,
428 .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
429 .tlb_add_page = arm_smmu_tlb_add_page,
Will Deaconabfd6fe2019-07-02 16:44:41 +0100430 },
Will Deacon15542402019-08-23 15:05:45 +0100431 .tlb_inv_range = arm_smmu_tlb_inv_range_s2,
Will Deacone953f7f2019-07-02 16:44:50 +0100432 .tlb_sync = arm_smmu_tlb_sync_context,
Robin Murphy11febfc2017-03-30 17:56:31 +0100433};
434
Will Deaconabfd6fe2019-07-02 16:44:41 +0100435static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
436 .tlb = {
437 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
438 .tlb_flush_walk = arm_smmu_tlb_inv_walk,
439 .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
440 .tlb_add_page = arm_smmu_tlb_add_page,
Will Deaconabfd6fe2019-07-02 16:44:41 +0100441 },
442 .tlb_inv_range = arm_smmu_tlb_inv_vmid_nosync,
Will Deacone953f7f2019-07-02 16:44:50 +0100443 .tlb_sync = arm_smmu_tlb_sync_vmid,
Will Deacon518f7132014-11-14 17:17:54 +0000444};
445
Will Deacon45ae7cf2013-06-24 18:31:25 +0100446static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
447{
Vivek Gautambc580b52019-04-22 12:40:36 +0530448 u32 fsr, fsynr, cbfrsynra;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100449 unsigned long iova;
450 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100451 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100452 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy19713fd2019-08-15 19:37:30 +0100453 int idx = smmu_domain->cfg.cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100454
Robin Murphy19713fd2019-08-15 19:37:30 +0100455 fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100456 if (!(fsr & FSR_FAULT))
457 return IRQ_NONE;
458
Robin Murphy19713fd2019-08-15 19:37:30 +0100459 fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
460 iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
461 cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100462
Will Deacon3714ce1d2016-08-05 19:49:45 +0100463 dev_err_ratelimited(smmu->dev,
Vivek Gautambc580b52019-04-22 12:40:36 +0530464 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
Robin Murphy19713fd2019-08-15 19:37:30 +0100465 fsr, iova, fsynr, cbfrsynra, idx);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100466
Robin Murphy19713fd2019-08-15 19:37:30 +0100467 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100468 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100469}
470
471static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
472{
473 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
474 struct arm_smmu_device *smmu = dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100475
Robin Murphy00320ce2019-08-15 19:37:31 +0100476 gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
477 gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0);
478 gfsynr1 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR1);
479 gfsynr2 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100480
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000481 if (!gfsr)
482 return IRQ_NONE;
483
Will Deacon45ae7cf2013-06-24 18:31:25 +0100484 dev_err_ratelimited(smmu->dev,
485 "Unexpected global fault, this could be serious\n");
486 dev_err_ratelimited(smmu->dev,
487 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
488 gfsr, gfsynr0, gfsynr1, gfsynr2);
489
Robin Murphy00320ce2019-08-15 19:37:31 +0100490 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr);
Will Deaconadaba322013-07-31 19:21:26 +0100491 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100492}
493
Will Deacon518f7132014-11-14 17:17:54 +0000494static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
495 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100496{
Will Deacon44680ee2014-06-25 11:29:12 +0100497 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy90df3732017-08-08 14:56:14 +0100498 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
499 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
500
501 cb->cfg = cfg;
502
Robin Murphy620565a2019-08-15 19:37:25 +0100503 /* TCR */
Robin Murphy90df3732017-08-08 14:56:14 +0100504 if (stage1) {
505 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
506 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
507 } else {
508 cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
509 cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Robin Murphy620565a2019-08-15 19:37:25 +0100510 cb->tcr[1] |= FIELD_PREP(TCR2_SEP, TCR2_SEP_UPSTREAM);
Robin Murphy90df3732017-08-08 14:56:14 +0100511 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy620565a2019-08-15 19:37:25 +0100512 cb->tcr[1] |= TCR2_AS;
Robin Murphy90df3732017-08-08 14:56:14 +0100513 }
514 } else {
515 cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
516 }
517
518 /* TTBRs */
519 if (stage1) {
520 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
521 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
522 cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
523 } else {
524 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Robin Murphy620565a2019-08-15 19:37:25 +0100525 cb->ttbr[0] |= FIELD_PREP(TTBRn_ASID, cfg->asid);
Robin Murphy90df3732017-08-08 14:56:14 +0100526 cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Robin Murphy620565a2019-08-15 19:37:25 +0100527 cb->ttbr[1] |= FIELD_PREP(TTBRn_ASID, cfg->asid);
Robin Murphy90df3732017-08-08 14:56:14 +0100528 }
529 } else {
530 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
531 }
532
533 /* MAIRs (stage-1 only) */
534 if (stage1) {
535 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
536 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
537 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
538 } else {
539 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
540 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
541 }
542 }
543}
544
545static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
546{
547 u32 reg;
548 bool stage1;
549 struct arm_smmu_cb *cb = &smmu->cbs[idx];
550 struct arm_smmu_cfg *cfg = cb->cfg;
Robin Murphy90df3732017-08-08 14:56:14 +0100551
552 /* Unassigned context banks only need disabling */
553 if (!cfg) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100554 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, 0);
Robin Murphy90df3732017-08-08 14:56:14 +0100555 return;
556 }
557
Will Deacon44680ee2014-06-25 11:29:12 +0100558 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100559
Robin Murphy90df3732017-08-08 14:56:14 +0100560 /* CBA2R */
Will Deacon4a1c93c2015-03-04 12:21:03 +0000561 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100562 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy5114e962019-08-15 19:37:24 +0100563 reg = CBA2R_VA64;
Robin Murphy7602b872016-04-28 17:12:09 +0100564 else
Robin Murphy5114e962019-08-15 19:37:24 +0100565 reg = 0;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800566 /* 16-bit VMIDs live in CBA2R */
567 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Robin Murphy5114e962019-08-15 19:37:24 +0100568 reg |= FIELD_PREP(CBA2R_VMID16, cfg->vmid);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800569
Robin Murphyaadbf212019-08-15 19:37:29 +0100570 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBA2R(idx), reg);
Will Deacon4a1c93c2015-03-04 12:21:03 +0000571 }
572
Will Deacon45ae7cf2013-06-24 18:31:25 +0100573 /* CBAR */
Robin Murphy5114e962019-08-15 19:37:24 +0100574 reg = FIELD_PREP(CBAR_TYPE, cfg->cbar);
Robin Murphyb7862e32016-04-13 18:13:03 +0100575 if (smmu->version < ARM_SMMU_V2)
Robin Murphy5114e962019-08-15 19:37:24 +0100576 reg |= FIELD_PREP(CBAR_IRPTNDX, cfg->irptndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100577
Will Deacon57ca90f2014-02-06 14:59:05 +0000578 /*
579 * Use the weakest shareability/memory types, so they are
580 * overridden by the ttbcr/pte.
581 */
582 if (stage1) {
Robin Murphy5114e962019-08-15 19:37:24 +0100583 reg |= FIELD_PREP(CBAR_S1_BPSHCFG, CBAR_S1_BPSHCFG_NSH) |
584 FIELD_PREP(CBAR_S1_MEMATTR, CBAR_S1_MEMATTR_WB);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800585 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
586 /* 8-bit VMIDs live in CBAR */
Robin Murphy5114e962019-08-15 19:37:24 +0100587 reg |= FIELD_PREP(CBAR_VMID, cfg->vmid);
Will Deacon57ca90f2014-02-06 14:59:05 +0000588 }
Robin Murphyaadbf212019-08-15 19:37:29 +0100589 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100590
Sunil Goutham125458a2017-03-28 16:11:12 +0530591 /*
Robin Murphy620565a2019-08-15 19:37:25 +0100592 * TCR
Sunil Goutham125458a2017-03-28 16:11:12 +0530593 * We must write this before the TTBRs, since it determines the
594 * access behaviour of some fields (in particular, ASID[15:8]).
595 */
Robin Murphy90df3732017-08-08 14:56:14 +0100596 if (stage1 && smmu->version > ARM_SMMU_V1)
Robin Murphy19713fd2019-08-15 19:37:30 +0100597 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]);
598 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100599
Will Deacon45ae7cf2013-06-24 18:31:25 +0100600 /* TTBRs */
Robin Murphy90df3732017-08-08 14:56:14 +0100601 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100602 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid);
603 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
604 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100605 } else {
Robin Murphy19713fd2019-08-15 19:37:30 +0100606 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
Robin Murphy90df3732017-08-08 14:56:14 +0100607 if (stage1)
Robin Murphy19713fd2019-08-15 19:37:30 +0100608 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR1,
609 cb->ttbr[1]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100610 }
611
Will Deacon518f7132014-11-14 17:17:54 +0000612 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100613 if (stage1) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100614 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]);
615 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100616 }
617
Will Deacon45ae7cf2013-06-24 18:31:25 +0100618 /* SCTLR */
Robin Murphy60705292016-08-11 17:44:06 +0100619 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100620 if (stage1)
621 reg |= SCTLR_S1_ASIDPNE;
Robin Murphy90df3732017-08-08 14:56:14 +0100622 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
623 reg |= SCTLR_E;
624
Robin Murphy19713fd2019-08-15 19:37:30 +0100625 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100626}
627
628static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100629 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100630{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100631 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000632 unsigned long ias, oas;
633 struct io_pgtable_ops *pgtbl_ops;
634 struct io_pgtable_cfg pgtbl_cfg;
635 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100636 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100637 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100638
Will Deacon518f7132014-11-14 17:17:54 +0000639 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100640 if (smmu_domain->smmu)
641 goto out_unlock;
642
Will Deacon61bc6712017-01-06 16:56:03 +0000643 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
644 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
645 smmu_domain->smmu = smmu;
646 goto out_unlock;
647 }
648
Will Deaconc752ce42014-06-25 22:46:31 +0100649 /*
650 * Mapping the requested stage onto what we support is surprisingly
651 * complicated, mainly because the spec allows S1+S2 SMMUs without
652 * support for nested translation. That means we end up with the
653 * following table:
654 *
655 * Requested Supported Actual
656 * S1 N S1
657 * S1 S1+S2 S1
658 * S1 S2 S2
659 * S1 S1 S1
660 * N N N
661 * N S1+S2 S2
662 * N S2 S2
663 * N S1 S1
664 *
665 * Note that you can't actually request stage-2 mappings.
666 */
667 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
668 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
669 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
670 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
671
Robin Murphy7602b872016-04-28 17:12:09 +0100672 /*
673 * Choosing a suitable context format is even more fiddly. Until we
674 * grow some way for the caller to express a preference, and/or move
675 * the decision into the io-pgtable code where it arguably belongs,
676 * just aim for the closest thing to the rest of the system, and hope
677 * that the hardware isn't esoteric enough that we can't assume AArch64
678 * support to be a superset of AArch32 support...
679 */
680 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
681 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100682 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
683 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
684 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
685 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
686 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100687 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
688 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
689 ARM_SMMU_FEAT_FMT_AARCH64_16K |
690 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
691 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
692
693 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
694 ret = -EINVAL;
695 goto out_unlock;
696 }
697
Will Deaconc752ce42014-06-25 22:46:31 +0100698 switch (smmu_domain->stage) {
699 case ARM_SMMU_DOMAIN_S1:
700 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
701 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000702 ias = smmu->va_size;
703 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100704 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000705 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +0100706 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +0000707 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100708 ias = min(ias, 32UL);
709 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +0100710 } else {
711 fmt = ARM_V7S;
712 ias = min(ias, 32UL);
713 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +0100714 }
Will Deaconabfd6fe2019-07-02 16:44:41 +0100715 smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops;
Will Deaconc752ce42014-06-25 22:46:31 +0100716 break;
717 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100718 /*
719 * We will likely want to change this if/when KVM gets
720 * involved.
721 */
Will Deaconc752ce42014-06-25 22:46:31 +0100722 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100723 cfg->cbar = CBAR_TYPE_S2_TRANS;
724 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000725 ias = smmu->ipa_size;
726 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100727 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000728 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100729 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000730 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100731 ias = min(ias, 40UL);
732 oas = min(oas, 40UL);
733 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100734 if (smmu->version == ARM_SMMU_V2)
Will Deaconabfd6fe2019-07-02 16:44:41 +0100735 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2;
Robin Murphy11febfc2017-03-30 17:56:31 +0100736 else
Will Deaconabfd6fe2019-07-02 16:44:41 +0100737 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1;
Will Deaconc752ce42014-06-25 22:46:31 +0100738 break;
739 default:
740 ret = -EINVAL;
741 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100742 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100743 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
744 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200745 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100746 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100747
Will Deacon44680ee2014-06-25 11:29:12 +0100748 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100749 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100750 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
751 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100752 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100753 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100754 }
755
Robin Murphy280b6832017-03-30 17:56:29 +0100756 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
Robin Murphyba7e4a02019-08-15 19:37:37 +0100757 cfg->vmid = cfg->cbndx + 1;
Robin Murphy280b6832017-03-30 17:56:29 +0100758 else
Robin Murphyba7e4a02019-08-15 19:37:37 +0100759 cfg->asid = cfg->cbndx;
760
761 smmu_domain->smmu = smmu;
762 if (smmu->impl && smmu->impl->init_context) {
763 ret = smmu->impl->init_context(smmu_domain);
764 if (ret)
765 goto out_unlock;
766 }
Robin Murphy280b6832017-03-30 17:56:29 +0100767
Will Deacon518f7132014-11-14 17:17:54 +0000768 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100769 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000770 .ias = ias,
771 .oas = oas,
Will Deacon4f418452019-06-25 12:51:25 +0100772 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
Will Deaconabfd6fe2019-07-02 16:44:41 +0100773 .tlb = &smmu_domain->flush_ops->tlb,
Robin Murphy2df7a252015-07-29 19:46:06 +0100774 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000775 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100776
Robin Murphy44f68762018-09-20 17:10:27 +0100777 if (smmu_domain->non_strict)
778 pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
779
Will Deacon518f7132014-11-14 17:17:54 +0000780 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
781 if (!pgtbl_ops) {
782 ret = -ENOMEM;
783 goto out_clear_smmu;
784 }
785
Robin Murphyd5466352016-05-09 17:20:09 +0100786 /* Update the domain's page sizes to reflect the page table format */
787 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Robin Murphy455eb7d2016-09-12 17:13:58 +0100788 domain->geometry.aperture_end = (1UL << ias) - 1;
789 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +0000790
791 /* Initialise the context bank with our page table cfg */
792 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
Robin Murphy90df3732017-08-08 14:56:14 +0100793 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000794
795 /*
796 * Request context fault interrupt. Do this last to avoid the
797 * handler seeing a half-initialised domain state.
798 */
Will Deacon44680ee2014-06-25 11:29:12 +0100799 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800800 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
801 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200802 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100803 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100804 cfg->irptndx, irq);
805 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100806 }
807
Will Deacon518f7132014-11-14 17:17:54 +0000808 mutex_unlock(&smmu_domain->init_mutex);
809
810 /* Publish page table ops for map/unmap */
811 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100812 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100813
Will Deacon518f7132014-11-14 17:17:54 +0000814out_clear_smmu:
815 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100816out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +0000817 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100818 return ret;
819}
820
821static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
822{
Joerg Roedel1d672632015-03-26 13:43:10 +0100823 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100824 struct arm_smmu_device *smmu = smmu_domain->smmu;
825 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Sricharan Rd4a44f02018-12-04 11:52:10 +0530826 int ret, irq;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100827
Will Deacon61bc6712017-01-06 16:56:03 +0000828 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100829 return;
830
Sricharan Rd4a44f02018-12-04 11:52:10 +0530831 ret = arm_smmu_rpm_get(smmu);
832 if (ret < 0)
833 return;
834
Will Deacon518f7132014-11-14 17:17:54 +0000835 /*
836 * Disable the context bank and free the page tables before freeing
837 * it.
838 */
Robin Murphy90df3732017-08-08 14:56:14 +0100839 smmu->cbs[cfg->cbndx].cfg = NULL;
840 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100841
Will Deacon44680ee2014-06-25 11:29:12 +0100842 if (cfg->irptndx != INVALID_IRPTNDX) {
843 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800844 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100845 }
846
Markus Elfring44830b02015-11-06 18:32:41 +0100847 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +0100848 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Sricharan Rd4a44f02018-12-04 11:52:10 +0530849
850 arm_smmu_rpm_put(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100851}
852
Joerg Roedel1d672632015-03-26 13:43:10 +0100853static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100854{
855 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100856
Will Deacon61bc6712017-01-06 16:56:03 +0000857 if (type != IOMMU_DOMAIN_UNMANAGED &&
858 type != IOMMU_DOMAIN_DMA &&
859 type != IOMMU_DOMAIN_IDENTITY)
Joerg Roedel1d672632015-03-26 13:43:10 +0100860 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100861 /*
862 * Allocate the domain and initialise some of its data structures.
863 * We can't really do anything meaningful until we've added a
864 * master.
865 */
866 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
867 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +0100868 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100869
Robin Murphy021bb842016-09-14 15:26:46 +0100870 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
871 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +0000872 kfree(smmu_domain);
873 return NULL;
874 }
875
Will Deacon518f7132014-11-14 17:17:54 +0000876 mutex_init(&smmu_domain->init_mutex);
Robin Murphy523d7422017-06-22 16:53:56 +0100877 spin_lock_init(&smmu_domain->cb_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +0100878
879 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100880}
881
Joerg Roedel1d672632015-03-26 13:43:10 +0100882static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100883{
Joerg Roedel1d672632015-03-26 13:43:10 +0100884 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +0100885
886 /*
887 * Free the domain resources. We assume that all devices have
888 * already been detached.
889 */
Robin Murphy9adb9592016-01-26 18:06:36 +0000890 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100891 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100892 kfree(smmu_domain);
893}
894
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100895static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
896{
897 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphy0caf5f42019-08-15 19:37:23 +0100898 u32 reg = FIELD_PREP(SMR_ID, smr->id) | FIELD_PREP(SMR_MASK, smr->mask);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100899
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300900 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100901 reg |= SMR_VALID;
Robin Murphy00320ce2019-08-15 19:37:31 +0100902 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100903}
904
Robin Murphy8e8b2032016-09-12 17:13:50 +0100905static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
906{
907 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
Robin Murphy0caf5f42019-08-15 19:37:23 +0100908 u32 reg = FIELD_PREP(S2CR_TYPE, s2cr->type) |
909 FIELD_PREP(S2CR_CBNDX, s2cr->cbndx) |
910 FIELD_PREP(S2CR_PRIVCFG, s2cr->privcfg);
Robin Murphy8e8b2032016-09-12 17:13:50 +0100911
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300912 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
913 smmu->smrs[idx].valid)
914 reg |= S2CR_EXIDVALID;
Robin Murphy00320ce2019-08-15 19:37:31 +0100915 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
Robin Murphy8e8b2032016-09-12 17:13:50 +0100916}
917
918static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
919{
920 arm_smmu_write_s2cr(smmu, idx);
921 if (smmu->smrs)
922 arm_smmu_write_smr(smmu, idx);
923}
924
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300925/*
926 * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
927 * should be called after sCR0 is written.
928 */
929static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
930{
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300931 u32 smr;
932
933 if (!smmu->smrs)
934 return;
935
936 /*
937 * SMR.ID bits may not be preserved if the corresponding MASK
938 * bits are set, so check each one separately. We can reject
939 * masters later if they try to claim IDs outside these masks.
940 */
Robin Murphy0caf5f42019-08-15 19:37:23 +0100941 smr = FIELD_PREP(SMR_ID, smmu->streamid_mask);
Robin Murphy00320ce2019-08-15 19:37:31 +0100942 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(0), smr);
943 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(0));
Robin Murphy0caf5f42019-08-15 19:37:23 +0100944 smmu->streamid_mask = FIELD_GET(SMR_ID, smr);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300945
Robin Murphy0caf5f42019-08-15 19:37:23 +0100946 smr = FIELD_PREP(SMR_MASK, smmu->streamid_mask);
Robin Murphy00320ce2019-08-15 19:37:31 +0100947 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(0), smr);
948 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(0));
Robin Murphy0caf5f42019-08-15 19:37:23 +0100949 smmu->smr_mask_mask = FIELD_GET(SMR_MASK, smr);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300950}
951
Robin Murphy588888a2016-09-12 17:13:54 +0100952static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100953{
954 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy588888a2016-09-12 17:13:54 +0100955 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100956
Robin Murphy588888a2016-09-12 17:13:54 +0100957 /* Stream indexing is blissfully easy */
958 if (!smrs)
959 return id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100960
Robin Murphy588888a2016-09-12 17:13:54 +0100961 /* Validating SMRs is... less so */
962 for (i = 0; i < smmu->num_mapping_groups; ++i) {
963 if (!smrs[i].valid) {
964 /*
965 * Note the first free entry we come across, which
966 * we'll claim in the end if nothing else matches.
967 */
968 if (free_idx < 0)
969 free_idx = i;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100970 continue;
971 }
Robin Murphy588888a2016-09-12 17:13:54 +0100972 /*
973 * If the new entry is _entirely_ matched by an existing entry,
974 * then reuse that, with the guarantee that there also cannot
975 * be any subsequent conflicting entries. In normal use we'd
976 * expect simply identical entries for this case, but there's
977 * no harm in accommodating the generalisation.
978 */
979 if ((mask & smrs[i].mask) == mask &&
980 !((id ^ smrs[i].id) & ~smrs[i].mask))
981 return i;
982 /*
983 * If the new entry has any other overlap with an existing one,
984 * though, then there always exists at least one stream ID
985 * which would cause a conflict, and we can't allow that risk.
986 */
987 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
988 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100989 }
990
Robin Murphy588888a2016-09-12 17:13:54 +0100991 return free_idx;
992}
993
994static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
995{
996 if (--smmu->s2crs[idx].count)
997 return false;
998
999 smmu->s2crs[idx] = s2cr_init_val;
1000 if (smmu->smrs)
1001 smmu->smrs[idx].valid = false;
1002
1003 return true;
1004}
1005
1006static int arm_smmu_master_alloc_smes(struct device *dev)
1007{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001008 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001009 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy588888a2016-09-12 17:13:54 +01001010 struct arm_smmu_device *smmu = cfg->smmu;
1011 struct arm_smmu_smr *smrs = smmu->smrs;
1012 struct iommu_group *group;
1013 int i, idx, ret;
1014
1015 mutex_lock(&smmu->stream_map_mutex);
1016 /* Figure out a viable stream map entry allocation */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001017 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001018 u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]);
1019 u16 mask = FIELD_GET(SMR_MASK, fwspec->ids[i]);
Robin Murphy021bb842016-09-14 15:26:46 +01001020
Robin Murphy588888a2016-09-12 17:13:54 +01001021 if (idx != INVALID_SMENDX) {
1022 ret = -EEXIST;
1023 goto out_err;
1024 }
1025
Robin Murphy021bb842016-09-14 15:26:46 +01001026 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy588888a2016-09-12 17:13:54 +01001027 if (ret < 0)
1028 goto out_err;
1029
1030 idx = ret;
1031 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy021bb842016-09-14 15:26:46 +01001032 smrs[idx].id = sid;
1033 smrs[idx].mask = mask;
Robin Murphy588888a2016-09-12 17:13:54 +01001034 smrs[idx].valid = true;
1035 }
1036 smmu->s2crs[idx].count++;
1037 cfg->smendx[i] = (s16)idx;
1038 }
1039
1040 group = iommu_group_get_for_dev(dev);
1041 if (!group)
1042 group = ERR_PTR(-ENOMEM);
1043 if (IS_ERR(group)) {
1044 ret = PTR_ERR(group);
1045 goto out_err;
1046 }
1047 iommu_group_put(group);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001048
Will Deacon45ae7cf2013-06-24 18:31:25 +01001049 /* It worked! Now, poke the actual hardware */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001050 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001051 arm_smmu_write_sme(smmu, idx);
1052 smmu->s2crs[idx].group = group;
1053 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001054
Robin Murphy588888a2016-09-12 17:13:54 +01001055 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001056 return 0;
1057
Robin Murphy588888a2016-09-12 17:13:54 +01001058out_err:
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001059 while (i--) {
Robin Murphy588888a2016-09-12 17:13:54 +01001060 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001061 cfg->smendx[i] = INVALID_SMENDX;
1062 }
Robin Murphy588888a2016-09-12 17:13:54 +01001063 mutex_unlock(&smmu->stream_map_mutex);
1064 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001065}
1066
Robin Murphyadfec2e2016-09-12 17:13:55 +01001067static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001068{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001069 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1070 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphyd3097e32016-09-12 17:13:53 +01001071 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001072
Robin Murphy588888a2016-09-12 17:13:54 +01001073 mutex_lock(&smmu->stream_map_mutex);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001074 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001075 if (arm_smmu_free_sme(smmu, idx))
1076 arm_smmu_write_sme(smmu, idx);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001077 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001078 }
Robin Murphy588888a2016-09-12 17:13:54 +01001079 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001080}
1081
Will Deacon45ae7cf2013-06-24 18:31:25 +01001082static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphyadfec2e2016-09-12 17:13:55 +01001083 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001084{
Will Deacon44680ee2014-06-25 11:29:12 +01001085 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001086 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001087 u8 cbndx = smmu_domain->cfg.cbndx;
Will Deacon61bc6712017-01-06 16:56:03 +00001088 enum arm_smmu_s2cr_type type;
Robin Murphy588888a2016-09-12 17:13:54 +01001089 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001090
Will Deacon61bc6712017-01-06 16:56:03 +00001091 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
1092 type = S2CR_TYPE_BYPASS;
1093 else
1094 type = S2CR_TYPE_TRANS;
1095
Robin Murphyadfec2e2016-09-12 17:13:55 +01001096 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy8e8b2032016-09-12 17:13:50 +01001097 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy588888a2016-09-12 17:13:54 +01001098 continue;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001099
Robin Murphy8e8b2032016-09-12 17:13:50 +01001100 s2cr[idx].type = type;
Sricharan Re1989802017-01-06 18:58:15 +05301101 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001102 s2cr[idx].cbndx = cbndx;
1103 arm_smmu_write_s2cr(smmu, idx);
Will Deacon43b412b2014-07-15 11:22:24 +01001104 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001105 return 0;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001106}
1107
Will Deacon45ae7cf2013-06-24 18:31:25 +01001108static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1109{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001110 int ret;
Joerg Roedel9b468f72018-11-29 14:01:00 +01001111 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001112 struct arm_smmu_device *smmu;
Joerg Roedel1d672632015-03-26 13:43:10 +01001113 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001114
Robin Murphyadfec2e2016-09-12 17:13:55 +01001115 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001116 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1117 return -ENXIO;
1118 }
1119
Robin Murphyfba4f8e2016-10-17 12:06:21 +01001120 /*
1121 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1122 * domains between of_xlate() and add_device() - we have no way to cope
1123 * with that, so until ARM gets converted to rely on groups and default
1124 * domains, just say no (but more politely than by dereferencing NULL).
1125 * This should be at least a WARN_ON once that's sorted.
1126 */
1127 if (!fwspec->iommu_priv)
1128 return -ENODEV;
1129
Robin Murphyadfec2e2016-09-12 17:13:55 +01001130 smmu = fwspec_smmu(fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301131
1132 ret = arm_smmu_rpm_get(smmu);
1133 if (ret < 0)
1134 return ret;
1135
Will Deacon518f7132014-11-14 17:17:54 +00001136 /* Ensure that the domain is finalised */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001137 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001138 if (ret < 0)
Sricharan Rd4a44f02018-12-04 11:52:10 +05301139 goto rpm_put;
Will Deacon518f7132014-11-14 17:17:54 +00001140
Will Deacon45ae7cf2013-06-24 18:31:25 +01001141 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001142 * Sanity check the domain. We don't support domains across
1143 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001144 */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001145 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001146 dev_err(dev,
1147 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphyadfec2e2016-09-12 17:13:55 +01001148 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Sricharan Rd4a44f02018-12-04 11:52:10 +05301149 ret = -EINVAL;
1150 goto rpm_put;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001151 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001152
1153 /* Looks ok, so add the device to the domain */
Sricharan Rd4a44f02018-12-04 11:52:10 +05301154 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
1155
1156rpm_put:
1157 arm_smmu_rpm_put(smmu);
1158 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001159}
1160
Will Deacon45ae7cf2013-06-24 18:31:25 +01001161static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001162 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001163{
Robin Murphy523d7422017-06-22 16:53:56 +01001164 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301165 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1166 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001167
Will Deacon518f7132014-11-14 17:17:54 +00001168 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001169 return -ENODEV;
1170
Sricharan Rd4a44f02018-12-04 11:52:10 +05301171 arm_smmu_rpm_get(smmu);
1172 ret = ops->map(ops, iova, paddr, size, prot);
1173 arm_smmu_rpm_put(smmu);
1174
1175 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001176}
1177
1178static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
Will Deacon56f8af52019-07-02 16:44:06 +01001179 size_t size, struct iommu_iotlb_gather *gather)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001180{
Robin Murphy523d7422017-06-22 16:53:56 +01001181 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301182 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1183 size_t ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001184
Will Deacon518f7132014-11-14 17:17:54 +00001185 if (!ops)
1186 return 0;
1187
Sricharan Rd4a44f02018-12-04 11:52:10 +05301188 arm_smmu_rpm_get(smmu);
Will Deacona2d3a382019-07-02 16:44:58 +01001189 ret = ops->unmap(ops, iova, size, gather);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301190 arm_smmu_rpm_put(smmu);
1191
1192 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001193}
1194
Robin Murphy44f68762018-09-20 17:10:27 +01001195static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
1196{
1197 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301198 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy44f68762018-09-20 17:10:27 +01001199
Will Deaconabfd6fe2019-07-02 16:44:41 +01001200 if (smmu_domain->flush_ops) {
Sricharan Rd4a44f02018-12-04 11:52:10 +05301201 arm_smmu_rpm_get(smmu);
Will Deaconabfd6fe2019-07-02 16:44:41 +01001202 smmu_domain->flush_ops->tlb.tlb_flush_all(smmu_domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301203 arm_smmu_rpm_put(smmu);
1204 }
Robin Murphy44f68762018-09-20 17:10:27 +01001205}
1206
Will Deacon56f8af52019-07-02 16:44:06 +01001207static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
1208 struct iommu_iotlb_gather *gather)
Robin Murphy32b12442017-09-28 15:55:01 +01001209{
1210 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301211 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy32b12442017-09-28 15:55:01 +01001212
Will Deaconabfd6fe2019-07-02 16:44:41 +01001213 if (smmu_domain->flush_ops) {
Sricharan Rd4a44f02018-12-04 11:52:10 +05301214 arm_smmu_rpm_get(smmu);
Will Deacone953f7f2019-07-02 16:44:50 +01001215 smmu_domain->flush_ops->tlb_sync(smmu_domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301216 arm_smmu_rpm_put(smmu);
1217 }
Robin Murphy32b12442017-09-28 15:55:01 +01001218}
1219
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001220static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1221 dma_addr_t iova)
1222{
Joerg Roedel1d672632015-03-26 13:43:10 +01001223 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001224 struct arm_smmu_device *smmu = smmu_domain->smmu;
1225 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1226 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1227 struct device *dev = smmu->dev;
Robin Murphy19713fd2019-08-15 19:37:30 +01001228 void __iomem *reg;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001229 u32 tmp;
1230 u64 phys;
Robin Murphy523d7422017-06-22 16:53:56 +01001231 unsigned long va, flags;
Robin Murphy19713fd2019-08-15 19:37:30 +01001232 int ret, idx = cfg->cbndx;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301233
1234 ret = arm_smmu_rpm_get(smmu);
1235 if (ret < 0)
1236 return 0;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001237
Robin Murphy523d7422017-06-22 16:53:56 +01001238 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy661d9622015-05-27 17:09:34 +01001239 va = iova & ~0xfffUL;
Robin Murphy61005762019-08-15 19:37:28 +01001240 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy19713fd2019-08-15 19:37:30 +01001241 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
Robin Murphy61005762019-08-15 19:37:28 +01001242 else
Robin Murphy19713fd2019-08-15 19:37:30 +01001243 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001244
Robin Murphy19713fd2019-08-15 19:37:30 +01001245 reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR;
1246 if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ATSR_ACTIVE), 5, 50)) {
Robin Murphy523d7422017-06-22 16:53:56 +01001247 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001248 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001249 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001250 &iova);
1251 return ops->iova_to_phys(ops, iova);
1252 }
1253
Robin Murphy19713fd2019-08-15 19:37:30 +01001254 phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR);
Robin Murphy523d7422017-06-22 16:53:56 +01001255 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001256 if (phys & CB_PAR_F) {
1257 dev_err(dev, "translation fault!\n");
1258 dev_err(dev, "PAR = 0x%llx\n", phys);
1259 return 0;
1260 }
1261
Sricharan Rd4a44f02018-12-04 11:52:10 +05301262 arm_smmu_rpm_put(smmu);
1263
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001264 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1265}
1266
Will Deacon45ae7cf2013-06-24 18:31:25 +01001267static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001268 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001269{
Joerg Roedel1d672632015-03-26 13:43:10 +01001270 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Robin Murphy523d7422017-06-22 16:53:56 +01001271 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001272
Sunil Gouthambdf95922017-04-25 15:27:52 +05301273 if (domain->type == IOMMU_DOMAIN_IDENTITY)
1274 return iova;
1275
Will Deacon518f7132014-11-14 17:17:54 +00001276 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001277 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001278
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001279 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
Robin Murphy523d7422017-06-22 16:53:56 +01001280 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1281 return arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001282
Robin Murphy523d7422017-06-22 16:53:56 +01001283 return ops->iova_to_phys(ops, iova);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001284}
1285
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001286static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001287{
Will Deacond0948942014-06-24 17:30:10 +01001288 switch (cap) {
1289 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001290 /*
1291 * Return true here as the SMMU can always send out coherent
1292 * requests.
1293 */
1294 return true;
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001295 case IOMMU_CAP_NOEXEC:
1296 return true;
Will Deacond0948942014-06-24 17:30:10 +01001297 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001298 return false;
Will Deacond0948942014-06-24 17:30:10 +01001299 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001300}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001301
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001302static
1303struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
Robin Murphy021bb842016-09-14 15:26:46 +01001304{
Suzuki K Poulose67843bb2019-07-23 23:18:34 +01001305 struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver,
1306 fwnode);
Robin Murphy021bb842016-09-14 15:26:46 +01001307 put_device(dev);
1308 return dev ? dev_get_drvdata(dev) : NULL;
1309}
1310
Will Deacon03edb222015-01-19 14:27:33 +00001311static int arm_smmu_add_device(struct device *dev)
1312{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001313 struct arm_smmu_device *smmu;
Robin Murphyf80cd882016-09-14 15:21:39 +01001314 struct arm_smmu_master_cfg *cfg;
Joerg Roedel9b468f72018-11-29 14:01:00 +01001315 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001316 int i, ret;
1317
Robin Murphy021bb842016-09-14 15:26:46 +01001318 if (using_legacy_binding) {
1319 ret = arm_smmu_register_legacy_master(dev, &smmu);
Artem Savkova7990c62017-08-08 12:26:02 +02001320
1321 /*
1322 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
1323 * will allocate/initialise a new one. Thus we need to update fwspec for
1324 * later use.
1325 */
Joerg Roedel9b468f72018-11-29 14:01:00 +01001326 fwspec = dev_iommu_fwspec_get(dev);
Robin Murphy021bb842016-09-14 15:26:46 +01001327 if (ret)
1328 goto out_free;
Robin Murphy3c117b52016-11-02 17:31:32 +00001329 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001330 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
Robin Murphy021bb842016-09-14 15:26:46 +01001331 } else {
1332 return -ENODEV;
1333 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001334
1335 ret = -EINVAL;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001336 for (i = 0; i < fwspec->num_ids; i++) {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001337 u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]);
1338 u16 mask = FIELD_GET(SMR_MASK, fwspec->ids[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01001339
Robin Murphyadfec2e2016-09-12 17:13:55 +01001340 if (sid & ~smmu->streamid_mask) {
Robin Murphyf80cd882016-09-14 15:21:39 +01001341 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy021bb842016-09-14 15:26:46 +01001342 sid, smmu->streamid_mask);
1343 goto out_free;
1344 }
1345 if (mask & ~smmu->smr_mask_mask) {
1346 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
Peng Fan6323f472017-04-21 17:03:36 +08001347 mask, smmu->smr_mask_mask);
Robin Murphyf80cd882016-09-14 15:21:39 +01001348 goto out_free;
1349 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001350 }
Will Deacon03edb222015-01-19 14:27:33 +00001351
Robin Murphyadfec2e2016-09-12 17:13:55 +01001352 ret = -ENOMEM;
1353 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1354 GFP_KERNEL);
1355 if (!cfg)
1356 goto out_free;
1357
1358 cfg->smmu = smmu;
1359 fwspec->iommu_priv = cfg;
1360 while (i--)
1361 cfg->smendx[i] = INVALID_SMENDX;
1362
Sricharan Rd4a44f02018-12-04 11:52:10 +05301363 ret = arm_smmu_rpm_get(smmu);
1364 if (ret < 0)
1365 goto out_cfg_free;
1366
Robin Murphy588888a2016-09-12 17:13:54 +01001367 ret = arm_smmu_master_alloc_smes(dev);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301368 arm_smmu_rpm_put(smmu);
1369
Robin Murphyadfec2e2016-09-12 17:13:55 +01001370 if (ret)
Vivek Gautamc54451a2017-07-06 15:07:00 +05301371 goto out_cfg_free;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001372
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001373 iommu_device_link(&smmu->iommu, dev);
1374
Sricharan R655e3642018-12-04 11:52:11 +05301375 device_link_add(dev, smmu->dev,
1376 DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
1377
Robin Murphyadfec2e2016-09-12 17:13:55 +01001378 return 0;
Robin Murphyf80cd882016-09-14 15:21:39 +01001379
Vivek Gautamc54451a2017-07-06 15:07:00 +05301380out_cfg_free:
1381 kfree(cfg);
Robin Murphyf80cd882016-09-14 15:21:39 +01001382out_free:
Robin Murphyadfec2e2016-09-12 17:13:55 +01001383 iommu_fwspec_free(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001384 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00001385}
1386
Will Deacon45ae7cf2013-06-24 18:31:25 +01001387static void arm_smmu_remove_device(struct device *dev)
1388{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001389 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001390 struct arm_smmu_master_cfg *cfg;
1391 struct arm_smmu_device *smmu;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301392 int ret;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001393
Robin Murphyadfec2e2016-09-12 17:13:55 +01001394 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyf80cd882016-09-14 15:21:39 +01001395 return;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001396
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001397 cfg = fwspec->iommu_priv;
1398 smmu = cfg->smmu;
1399
Sricharan Rd4a44f02018-12-04 11:52:10 +05301400 ret = arm_smmu_rpm_get(smmu);
1401 if (ret < 0)
1402 return;
1403
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001404 iommu_device_unlink(&smmu->iommu, dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001405 arm_smmu_master_free_smes(fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301406
1407 arm_smmu_rpm_put(smmu);
1408
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001409 iommu_group_remove_device(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001410 kfree(fwspec->iommu_priv);
1411 iommu_fwspec_free(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001412}
1413
Joerg Roedelaf659932015-10-21 23:51:41 +02001414static struct iommu_group *arm_smmu_device_group(struct device *dev)
1415{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001416 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001417 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy588888a2016-09-12 17:13:54 +01001418 struct iommu_group *group = NULL;
1419 int i, idx;
1420
Robin Murphyadfec2e2016-09-12 17:13:55 +01001421 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001422 if (group && smmu->s2crs[idx].group &&
1423 group != smmu->s2crs[idx].group)
1424 return ERR_PTR(-EINVAL);
1425
1426 group = smmu->s2crs[idx].group;
1427 }
1428
1429 if (group)
Robin Murphye1b44cb2016-11-11 17:59:22 +00001430 return iommu_group_ref_get(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001431
1432 if (dev_is_pci(dev))
1433 group = pci_device_group(dev);
Nipun Guptaeab03e22018-09-10 19:19:18 +05301434 else if (dev_is_fsl_mc(dev))
1435 group = fsl_mc_device_group(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001436 else
1437 group = generic_device_group(dev);
1438
Joerg Roedelaf659932015-10-21 23:51:41 +02001439 return group;
1440}
1441
Will Deaconc752ce42014-06-25 22:46:31 +01001442static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1443 enum iommu_attr attr, void *data)
1444{
Joerg Roedel1d672632015-03-26 13:43:10 +01001445 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001446
Robin Murphy44f68762018-09-20 17:10:27 +01001447 switch(domain->type) {
1448 case IOMMU_DOMAIN_UNMANAGED:
1449 switch (attr) {
1450 case DOMAIN_ATTR_NESTING:
1451 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1452 return 0;
1453 default:
1454 return -ENODEV;
1455 }
1456 break;
1457 case IOMMU_DOMAIN_DMA:
1458 switch (attr) {
1459 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1460 *(int *)data = smmu_domain->non_strict;
1461 return 0;
1462 default:
1463 return -ENODEV;
1464 }
1465 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001466 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001467 return -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001468 }
1469}
1470
1471static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1472 enum iommu_attr attr, void *data)
1473{
Will Deacon518f7132014-11-14 17:17:54 +00001474 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001475 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001476
Will Deacon518f7132014-11-14 17:17:54 +00001477 mutex_lock(&smmu_domain->init_mutex);
1478
Robin Murphy44f68762018-09-20 17:10:27 +01001479 switch(domain->type) {
1480 case IOMMU_DOMAIN_UNMANAGED:
1481 switch (attr) {
1482 case DOMAIN_ATTR_NESTING:
1483 if (smmu_domain->smmu) {
1484 ret = -EPERM;
1485 goto out_unlock;
1486 }
1487
1488 if (*(int *)data)
1489 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1490 else
1491 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1492 break;
1493 default:
1494 ret = -ENODEV;
Will Deacon518f7132014-11-14 17:17:54 +00001495 }
Robin Murphy44f68762018-09-20 17:10:27 +01001496 break;
1497 case IOMMU_DOMAIN_DMA:
1498 switch (attr) {
1499 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1500 smmu_domain->non_strict = *(int *)data;
1501 break;
1502 default:
1503 ret = -ENODEV;
1504 }
Will Deacon518f7132014-11-14 17:17:54 +00001505 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001506 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001507 ret = -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001508 }
Will Deacon518f7132014-11-14 17:17:54 +00001509out_unlock:
1510 mutex_unlock(&smmu_domain->init_mutex);
1511 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001512}
1513
Robin Murphy021bb842016-09-14 15:26:46 +01001514static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1515{
Robin Murphy56fbf602017-03-31 12:03:33 +01001516 u32 mask, fwid = 0;
Robin Murphy021bb842016-09-14 15:26:46 +01001517
1518 if (args->args_count > 0)
Robin Murphy0caf5f42019-08-15 19:37:23 +01001519 fwid |= FIELD_PREP(SMR_ID, args->args[0]);
Robin Murphy021bb842016-09-14 15:26:46 +01001520
1521 if (args->args_count > 1)
Robin Murphy0caf5f42019-08-15 19:37:23 +01001522 fwid |= FIELD_PREP(SMR_MASK, args->args[1]);
Robin Murphy56fbf602017-03-31 12:03:33 +01001523 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
Robin Murphy0caf5f42019-08-15 19:37:23 +01001524 fwid |= FIELD_PREP(SMR_MASK, mask);
Robin Murphy021bb842016-09-14 15:26:46 +01001525
1526 return iommu_fwspec_add_ids(dev, &fwid, 1);
1527}
1528
Eric Augerf3ebee82017-01-19 20:57:55 +00001529static void arm_smmu_get_resv_regions(struct device *dev,
1530 struct list_head *head)
1531{
1532 struct iommu_resv_region *region;
1533 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1534
1535 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00001536 prot, IOMMU_RESV_SW_MSI);
Eric Augerf3ebee82017-01-19 20:57:55 +00001537 if (!region)
1538 return;
1539
1540 list_add_tail(&region->list, head);
Robin Murphy273df962017-03-16 17:00:19 +00001541
1542 iommu_dma_get_resv_regions(dev, head);
Eric Augerf3ebee82017-01-19 20:57:55 +00001543}
1544
1545static void arm_smmu_put_resv_regions(struct device *dev,
1546 struct list_head *head)
1547{
1548 struct iommu_resv_region *entry, *next;
1549
1550 list_for_each_entry_safe(entry, next, head, list)
1551 kfree(entry);
1552}
1553
Will Deacon518f7132014-11-14 17:17:54 +00001554static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001555 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001556 .domain_alloc = arm_smmu_domain_alloc,
1557 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001558 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001559 .map = arm_smmu_map,
1560 .unmap = arm_smmu_unmap,
Robin Murphy44f68762018-09-20 17:10:27 +01001561 .flush_iotlb_all = arm_smmu_flush_iotlb_all,
Robin Murphy32b12442017-09-28 15:55:01 +01001562 .iotlb_sync = arm_smmu_iotlb_sync,
Will Deaconc752ce42014-06-25 22:46:31 +01001563 .iova_to_phys = arm_smmu_iova_to_phys,
1564 .add_device = arm_smmu_add_device,
1565 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001566 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001567 .domain_get_attr = arm_smmu_domain_get_attr,
1568 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy021bb842016-09-14 15:26:46 +01001569 .of_xlate = arm_smmu_of_xlate,
Eric Augerf3ebee82017-01-19 20:57:55 +00001570 .get_resv_regions = arm_smmu_get_resv_regions,
1571 .put_resv_regions = arm_smmu_put_resv_regions,
Will Deacon518f7132014-11-14 17:17:54 +00001572 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001573};
1574
1575static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1576{
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001577 int i;
Robin Murphy62b993a2019-08-15 19:37:36 +01001578 u32 reg;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001579
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001580 /* clear global FSR */
Robin Murphy00320ce2019-08-15 19:37:31 +01001581 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
1582 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001583
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001584 /*
1585 * Reset stream mapping groups: Initial values mark all SMRn as
1586 * invalid and all S2CRn as bypass unless overridden.
1587 */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001588 for (i = 0; i < smmu->num_mapping_groups; ++i)
1589 arm_smmu_write_sme(smmu, i);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001590
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001591 /* Make sure all context banks are disabled and clear CB_FSR */
1592 for (i = 0; i < smmu->num_context_banks; ++i) {
Robin Murphy90df3732017-08-08 14:56:14 +01001593 arm_smmu_write_context_bank(smmu, i);
Robin Murphy19713fd2019-08-15 19:37:30 +01001594 arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, FSR_FAULT);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001595 }
Will Deacon1463fe42013-07-31 19:21:27 +01001596
Will Deacon45ae7cf2013-06-24 18:31:25 +01001597 /* Invalidate the TLB, just in case */
Robin Murphy00320ce2019-08-15 19:37:31 +01001598 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLH, QCOM_DUMMY_VAL);
1599 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLNSNH, QCOM_DUMMY_VAL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001600
Robin Murphy00320ce2019-08-15 19:37:31 +01001601 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001602
Will Deacon45ae7cf2013-06-24 18:31:25 +01001603 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001604 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001605
1606 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001607 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001608
Robin Murphy25a1c962016-02-10 14:25:33 +00001609 /* Enable client access, handling unmatched streams as appropriate */
1610 reg &= ~sCR0_CLIENTPD;
1611 if (disable_bypass)
1612 reg |= sCR0_USFCFG;
1613 else
1614 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001615
1616 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001617 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001618
1619 /* Don't upgrade barriers */
Robin Murphy0caf5f42019-08-15 19:37:23 +01001620 reg &= ~(sCR0_BSU);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001621
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001622 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1623 reg |= sCR0_VMID16EN;
1624
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001625 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1626 reg |= sCR0_EXIDENABLE;
1627
Robin Murphy62b993a2019-08-15 19:37:36 +01001628 if (smmu->impl && smmu->impl->reset)
1629 smmu->impl->reset(smmu);
1630
Will Deacon45ae7cf2013-06-24 18:31:25 +01001631 /* Push the button */
Robin Murphy11febfc2017-03-30 17:56:31 +01001632 arm_smmu_tlb_sync_global(smmu);
Robin Murphy00320ce2019-08-15 19:37:31 +01001633 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001634}
1635
1636static int arm_smmu_id_size_to_bits(int size)
1637{
1638 switch (size) {
1639 case 0:
1640 return 32;
1641 case 1:
1642 return 36;
1643 case 2:
1644 return 40;
1645 case 3:
1646 return 42;
1647 case 4:
1648 return 44;
1649 case 5:
1650 default:
1651 return 48;
1652 }
1653}
1654
1655static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1656{
Robin Murphy490325e2019-08-15 19:37:26 +01001657 unsigned int size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001658 u32 id;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001659 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001660 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001661
1662 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001663 dev_notice(smmu->dev, "SMMUv%d with:\n",
1664 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001665
1666 /* ID0 */
Robin Murphy00320ce2019-08-15 19:37:31 +01001667 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001668
1669 /* Restrict available stages based on module parameter */
1670 if (force_stage == 1)
1671 id &= ~(ID0_S2TS | ID0_NTS);
1672 else if (force_stage == 2)
1673 id &= ~(ID0_S1TS | ID0_NTS);
1674
Will Deacon45ae7cf2013-06-24 18:31:25 +01001675 if (id & ID0_S1TS) {
1676 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1677 dev_notice(smmu->dev, "\tstage 1 translation\n");
1678 }
1679
1680 if (id & ID0_S2TS) {
1681 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1682 dev_notice(smmu->dev, "\tstage 2 translation\n");
1683 }
1684
1685 if (id & ID0_NTS) {
1686 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1687 dev_notice(smmu->dev, "\tnested translation\n");
1688 }
1689
1690 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001691 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001692 dev_err(smmu->dev, "\tno translation support!\n");
1693 return -ENODEV;
1694 }
1695
Robin Murphyb7862e32016-04-13 18:13:03 +01001696 if ((id & ID0_S1TS) &&
1697 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001698 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1699 dev_notice(smmu->dev, "\taddress translation ops\n");
1700 }
1701
Robin Murphybae2c2d2015-07-29 19:46:05 +01001702 /*
1703 * In order for DMA API calls to work properly, we must defer to what
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001704 * the FW says about coherency, regardless of what the hardware claims.
Robin Murphybae2c2d2015-07-29 19:46:05 +01001705 * Fortunately, this also opens up a workaround for systems where the
1706 * ID register value has ended up configured incorrectly.
1707 */
Robin Murphybae2c2d2015-07-29 19:46:05 +01001708 cttw_reg = !!(id & ID0_CTTW);
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001709 if (cttw_fw || cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001710 dev_notice(smmu->dev, "\t%scoherent table walk\n",
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001711 cttw_fw ? "" : "non-");
1712 if (cttw_fw != cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001713 dev_notice(smmu->dev,
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001714 "\t(IDR0.CTTW overridden by FW configuration)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001715
Robin Murphy21174242016-09-12 17:13:48 +01001716 /* Max. number of entries we have for stream matching/indexing */
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001717 if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
1718 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1719 size = 1 << 16;
1720 } else {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001721 size = 1 << FIELD_GET(ID0_NUMSIDB, id);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001722 }
Robin Murphy21174242016-09-12 17:13:48 +01001723 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001724 if (id & ID0_SMS) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001725 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy0caf5f42019-08-15 19:37:23 +01001726 size = FIELD_GET(ID0_NUMSMRG, id);
Robin Murphy21174242016-09-12 17:13:48 +01001727 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001728 dev_err(smmu->dev,
1729 "stream-matching supported, but no SMRs present!\n");
1730 return -ENODEV;
1731 }
1732
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001733 /* Zero-initialised to mark as invalid */
1734 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1735 GFP_KERNEL);
1736 if (!smmu->smrs)
1737 return -ENOMEM;
1738
Will Deacon45ae7cf2013-06-24 18:31:25 +01001739 dev_notice(smmu->dev,
Robin Murphy490325e2019-08-15 19:37:26 +01001740 "\tstream matching with %u register groups", size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001741 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001742 /* s2cr->type == 0 means translation, so initialise explicitly */
1743 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1744 GFP_KERNEL);
1745 if (!smmu->s2crs)
1746 return -ENOMEM;
1747 for (i = 0; i < size; i++)
1748 smmu->s2crs[i] = s2cr_init_val;
1749
Robin Murphy21174242016-09-12 17:13:48 +01001750 smmu->num_mapping_groups = size;
Robin Murphy588888a2016-09-12 17:13:54 +01001751 mutex_init(&smmu->stream_map_mutex);
Will Deacon8e517e72017-07-06 15:55:48 +01001752 spin_lock_init(&smmu->global_sync_lock);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001753
Robin Murphy7602b872016-04-28 17:12:09 +01001754 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1755 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1756 if (!(id & ID0_PTFS_NO_AARCH32S))
1757 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1758 }
1759
Will Deacon45ae7cf2013-06-24 18:31:25 +01001760 /* ID1 */
Robin Murphy00320ce2019-08-15 19:37:31 +01001761 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001762 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001763
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001764 /* Check for size mismatch of SMMU address space from mapped region */
Robin Murphy0caf5f42019-08-15 19:37:23 +01001765 size = 1 << (FIELD_GET(ID1_NUMPAGENDXB, id) + 1);
Robin Murphy490325e2019-08-15 19:37:26 +01001766 if (smmu->numpage != 2 * size << smmu->pgshift)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001767 dev_warn(smmu->dev,
Robin Murphy490325e2019-08-15 19:37:26 +01001768 "SMMU address space size (0x%x) differs from mapped region size (0x%x)!\n",
1769 2 * size << smmu->pgshift, smmu->numpage);
1770 /* Now properly encode NUMPAGE to subsequently derive SMMU_CB_BASE */
1771 smmu->numpage = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001772
Robin Murphy0caf5f42019-08-15 19:37:23 +01001773 smmu->num_s2_context_banks = FIELD_GET(ID1_NUMS2CB, id);
1774 smmu->num_context_banks = FIELD_GET(ID1_NUMCB, id);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001775 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1776 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1777 return -ENODEV;
1778 }
1779 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1780 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphy90df3732017-08-08 14:56:14 +01001781 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
1782 sizeof(*smmu->cbs), GFP_KERNEL);
1783 if (!smmu->cbs)
1784 return -ENOMEM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001785
1786 /* ID2 */
Robin Murphy00320ce2019-08-15 19:37:31 +01001787 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2);
Robin Murphy0caf5f42019-08-15 19:37:23 +01001788 size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_IAS, id));
Will Deacon518f7132014-11-14 17:17:54 +00001789 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001790
Will Deacon518f7132014-11-14 17:17:54 +00001791 /* The output mask is also applied for bypass */
Robin Murphy0caf5f42019-08-15 19:37:23 +01001792 size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_OAS, id));
Will Deacon518f7132014-11-14 17:17:54 +00001793 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001794
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001795 if (id & ID2_VMID16)
1796 smmu->features |= ARM_SMMU_FEAT_VMID16;
1797
Robin Murphyf1d84542015-03-04 16:41:05 +00001798 /*
1799 * What the page table walker can address actually depends on which
1800 * descriptor format is in use, but since a) we don't know that yet,
1801 * and b) it can vary per context bank, this will have to do...
1802 */
1803 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1804 dev_warn(smmu->dev,
1805 "failed to set DMA mask for table walker\n");
1806
Robin Murphyb7862e32016-04-13 18:13:03 +01001807 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001808 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001809 if (smmu->version == ARM_SMMU_V1_64K)
1810 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001811 } else {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001812 size = FIELD_GET(ID2_UBS, id);
Will Deacon518f7132014-11-14 17:17:54 +00001813 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001814 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001815 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001816 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001817 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001818 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001819 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001820 }
1821
Robin Murphy7602b872016-04-28 17:12:09 +01001822 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01001823 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01001824 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01001825 if (smmu->features &
1826 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01001827 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01001828 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01001829 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01001830 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01001831 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01001832
Robin Murphyd5466352016-05-09 17:20:09 +01001833 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1834 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1835 else
1836 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1837 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1838 smmu->pgsize_bitmap);
1839
Will Deacon518f7132014-11-14 17:17:54 +00001840
Will Deacon28d60072014-09-01 16:24:48 +01001841 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1842 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001843 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001844
1845 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1846 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001847 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001848
Robin Murphy3995e182019-08-15 19:37:35 +01001849 if (smmu->impl && smmu->impl->cfg_probe)
1850 return smmu->impl->cfg_probe(smmu);
1851
Will Deacon45ae7cf2013-06-24 18:31:25 +01001852 return 0;
1853}
1854
Robin Murphy67b65a32016-04-13 18:12:57 +01001855struct arm_smmu_match_data {
1856 enum arm_smmu_arch_version version;
1857 enum arm_smmu_implementation model;
1858};
1859
1860#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
Sricharan R96a299d2018-12-04 11:52:09 +05301861static const struct arm_smmu_match_data name = { .version = ver, .model = imp }
Robin Murphy67b65a32016-04-13 18:12:57 +01001862
1863ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1864ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01001865ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001866ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01001867ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Vivek Gautam89cddc52018-12-04 11:52:13 +05301868ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01001869
Joerg Roedel09b52692014-10-02 12:24:45 +02001870static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01001871 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1872 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1873 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01001874 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001875 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01001876 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Vivek Gautam89cddc52018-12-04 11:52:13 +05301877 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01001878 { },
1879};
Robin Murphy09360402014-08-28 17:51:59 +01001880
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001881#ifdef CONFIG_ACPI
1882static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
1883{
1884 int ret = 0;
1885
1886 switch (model) {
1887 case ACPI_IORT_SMMU_V1:
1888 case ACPI_IORT_SMMU_CORELINK_MMU400:
1889 smmu->version = ARM_SMMU_V1;
1890 smmu->model = GENERIC_SMMU;
1891 break;
Robin Murphy84c24372017-06-19 16:41:56 +01001892 case ACPI_IORT_SMMU_CORELINK_MMU401:
1893 smmu->version = ARM_SMMU_V1_64K;
1894 smmu->model = GENERIC_SMMU;
1895 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001896 case ACPI_IORT_SMMU_V2:
1897 smmu->version = ARM_SMMU_V2;
1898 smmu->model = GENERIC_SMMU;
1899 break;
1900 case ACPI_IORT_SMMU_CORELINK_MMU500:
1901 smmu->version = ARM_SMMU_V2;
1902 smmu->model = ARM_MMU500;
1903 break;
Robin Murphy84c24372017-06-19 16:41:56 +01001904 case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
1905 smmu->version = ARM_SMMU_V2;
1906 smmu->model = CAVIUM_SMMUV2;
1907 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001908 default:
1909 ret = -ENODEV;
1910 }
1911
1912 return ret;
1913}
1914
1915static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
1916 struct arm_smmu_device *smmu)
1917{
1918 struct device *dev = smmu->dev;
1919 struct acpi_iort_node *node =
1920 *(struct acpi_iort_node **)dev_get_platdata(dev);
1921 struct acpi_iort_smmu *iort_smmu;
1922 int ret;
1923
1924 /* Retrieve SMMU1/2 specific data */
1925 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
1926
1927 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
1928 if (ret < 0)
1929 return ret;
1930
1931 /* Ignore the configuration access interrupt */
1932 smmu->num_global_irqs = 1;
1933
1934 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
1935 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1936
1937 return 0;
1938}
1939#else
1940static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
1941 struct arm_smmu_device *smmu)
1942{
1943 return -ENODEV;
1944}
1945#endif
1946
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001947static int arm_smmu_device_dt_probe(struct platform_device *pdev,
1948 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001949{
Robin Murphy67b65a32016-04-13 18:12:57 +01001950 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001951 struct device *dev = &pdev->dev;
Robin Murphy021bb842016-09-14 15:26:46 +01001952 bool legacy_binding;
1953
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001954 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1955 &smmu->num_global_irqs)) {
1956 dev_err(dev, "missing #global-interrupts property\n");
1957 return -ENODEV;
1958 }
1959
1960 data = of_device_get_match_data(dev);
1961 smmu->version = data->version;
1962 smmu->model = data->model;
1963
Robin Murphy021bb842016-09-14 15:26:46 +01001964 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
1965 if (legacy_binding && !using_generic_binding) {
1966 if (!using_legacy_binding)
1967 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
1968 using_legacy_binding = true;
1969 } else if (!legacy_binding && !using_legacy_binding) {
1970 using_generic_binding = true;
1971 } else {
1972 dev_err(dev, "not probing due to mismatched DT properties\n");
1973 return -ENODEV;
1974 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001975
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001976 if (of_dma_is_coherent(dev->of_node))
1977 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1978
1979 return 0;
1980}
1981
Robin Murphyf6810c12017-04-10 16:51:05 +05301982static void arm_smmu_bus_init(void)
1983{
1984 /* Oh, for a proper bus abstraction */
1985 if (!iommu_present(&platform_bus_type))
1986 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
1987#ifdef CONFIG_ARM_AMBA
1988 if (!iommu_present(&amba_bustype))
1989 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
1990#endif
1991#ifdef CONFIG_PCI
1992 if (!iommu_present(&pci_bus_type)) {
1993 pci_request_acs();
1994 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
1995 }
1996#endif
Nipun Guptaeab03e22018-09-10 19:19:18 +05301997#ifdef CONFIG_FSL_MC_BUS
1998 if (!iommu_present(&fsl_mc_bus_type))
1999 bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
2000#endif
Robin Murphyf6810c12017-04-10 16:51:05 +05302001}
2002
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002003static int arm_smmu_device_probe(struct platform_device *pdev)
2004{
2005 struct resource *res;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002006 resource_size_t ioaddr;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002007 struct arm_smmu_device *smmu;
2008 struct device *dev = &pdev->dev;
2009 int num_irqs, i, err;
2010
Will Deacon45ae7cf2013-06-24 18:31:25 +01002011 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2012 if (!smmu) {
2013 dev_err(dev, "failed to allocate arm_smmu_device\n");
2014 return -ENOMEM;
2015 }
2016 smmu->dev = dev;
2017
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002018 if (dev->of_node)
2019 err = arm_smmu_device_dt_probe(pdev, smmu);
2020 else
2021 err = arm_smmu_device_acpi_probe(pdev, smmu);
2022
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002023 if (err)
2024 return err;
Robin Murphy09360402014-08-28 17:51:59 +01002025
Robin Murphyfc058d32019-08-15 19:37:33 +01002026 smmu = arm_smmu_impl_init(smmu);
2027 if (IS_ERR(smmu))
2028 return PTR_ERR(smmu);
2029
Will Deacon45ae7cf2013-06-24 18:31:25 +01002030 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002031 ioaddr = res->start;
Julia Lawall8a7f4312013-08-19 12:20:37 +01002032 smmu->base = devm_ioremap_resource(dev, res);
2033 if (IS_ERR(smmu->base))
2034 return PTR_ERR(smmu->base);
Robin Murphy490325e2019-08-15 19:37:26 +01002035 /*
2036 * The resource size should effectively match the value of SMMU_TOP;
2037 * stash that temporarily until we know PAGESIZE to validate it with.
2038 */
2039 smmu->numpage = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002040
Will Deacon45ae7cf2013-06-24 18:31:25 +01002041 num_irqs = 0;
2042 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2043 num_irqs++;
2044 if (num_irqs > smmu->num_global_irqs)
2045 smmu->num_context_irqs++;
2046 }
2047
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002048 if (!smmu->num_context_irqs) {
2049 dev_err(dev, "found %d interrupts but expected at least %d\n",
2050 num_irqs, smmu->num_global_irqs + 1);
2051 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002052 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002053
Kees Cooka86854d2018-06-12 14:07:58 -07002054 smmu->irqs = devm_kcalloc(dev, num_irqs, sizeof(*smmu->irqs),
Will Deacon45ae7cf2013-06-24 18:31:25 +01002055 GFP_KERNEL);
2056 if (!smmu->irqs) {
2057 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2058 return -ENOMEM;
2059 }
2060
2061 for (i = 0; i < num_irqs; ++i) {
2062 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002063
Will Deacon45ae7cf2013-06-24 18:31:25 +01002064 if (irq < 0) {
2065 dev_err(dev, "failed to get irq index %d\n", i);
2066 return -ENODEV;
2067 }
2068 smmu->irqs[i] = irq;
2069 }
2070
Sricharan R96a299d2018-12-04 11:52:09 +05302071 err = devm_clk_bulk_get_all(dev, &smmu->clks);
2072 if (err < 0) {
2073 dev_err(dev, "failed to get clocks %d\n", err);
2074 return err;
2075 }
2076 smmu->num_clks = err;
2077
2078 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
2079 if (err)
2080 return err;
2081
Olav Haugan3c8766d2014-08-22 17:12:32 -07002082 err = arm_smmu_device_cfg_probe(smmu);
2083 if (err)
2084 return err;
2085
Vivek Gautamd1e20222018-07-19 23:23:56 +05302086 if (smmu->version == ARM_SMMU_V2) {
2087 if (smmu->num_context_banks > smmu->num_context_irqs) {
2088 dev_err(dev,
2089 "found only %d context irq(s) but %d required\n",
2090 smmu->num_context_irqs, smmu->num_context_banks);
2091 return -ENODEV;
2092 }
2093
2094 /* Ignore superfluous interrupts */
2095 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002096 }
2097
Will Deacon45ae7cf2013-06-24 18:31:25 +01002098 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08002099 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2100 arm_smmu_global_fault,
2101 IRQF_SHARED,
2102 "arm-smmu global fault",
2103 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002104 if (err) {
2105 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2106 i, smmu->irqs[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01002107 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002108 }
2109 }
2110
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002111 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2112 "smmu.%pa", &ioaddr);
2113 if (err) {
2114 dev_err(dev, "Failed to register iommu in sysfs\n");
2115 return err;
2116 }
2117
2118 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2119 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2120
2121 err = iommu_device_register(&smmu->iommu);
2122 if (err) {
2123 dev_err(dev, "Failed to register iommu\n");
2124 return err;
2125 }
2126
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002127 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01002128 arm_smmu_device_reset(smmu);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03002129 arm_smmu_test_smr_masks(smmu);
Robin Murphy021bb842016-09-14 15:26:46 +01002130
Robin Murphyf6810c12017-04-10 16:51:05 +05302131 /*
Sricharan Rd4a44f02018-12-04 11:52:10 +05302132 * We want to avoid touching dev->power.lock in fastpaths unless
2133 * it's really going to do something useful - pm_runtime_enabled()
2134 * can serve as an ideal proxy for that decision. So, conditionally
2135 * enable pm_runtime.
2136 */
2137 if (dev->pm_domain) {
2138 pm_runtime_set_active(dev);
2139 pm_runtime_enable(dev);
2140 }
2141
2142 /*
Robin Murphyf6810c12017-04-10 16:51:05 +05302143 * For ACPI and generic DT bindings, an SMMU will be probed before
2144 * any device which might need it, so we want the bus ops in place
2145 * ready to handle default domain setup as soon as any SMMU exists.
2146 */
2147 if (!using_legacy_binding)
2148 arm_smmu_bus_init();
2149
Will Deacon45ae7cf2013-06-24 18:31:25 +01002150 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002151}
2152
Robin Murphyf6810c12017-04-10 16:51:05 +05302153/*
2154 * With the legacy DT binding in play, though, we have no guarantees about
2155 * probe order, but then we're also not doing default domains, so we can
2156 * delay setting bus ops until we're sure every possible SMMU is ready,
2157 * and that way ensure that no add_device() calls get missed.
2158 */
2159static int arm_smmu_legacy_bus_init(void)
2160{
2161 if (using_legacy_binding)
2162 arm_smmu_bus_init();
2163 return 0;
2164}
2165device_initcall_sync(arm_smmu_legacy_bus_init);
2166
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002167static void arm_smmu_device_shutdown(struct platform_device *pdev)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002168{
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002169 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002170
2171 if (!smmu)
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002172 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002173
Will Deaconecfadb62013-07-31 19:21:28 +01002174 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002175 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002176
Sricharan Rd4a44f02018-12-04 11:52:10 +05302177 arm_smmu_rpm_get(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002178 /* Turn the thing off */
Robin Murphy00320ce2019-08-15 19:37:31 +01002179 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, sCR0_CLIENTPD);
Sricharan Rd4a44f02018-12-04 11:52:10 +05302180 arm_smmu_rpm_put(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302181
Sricharan Rd4a44f02018-12-04 11:52:10 +05302182 if (pm_runtime_enabled(smmu->dev))
2183 pm_runtime_force_suspend(smmu->dev);
2184 else
2185 clk_bulk_disable(smmu->num_clks, smmu->clks);
2186
2187 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
Nate Watterson7aa86192017-06-29 18:18:15 -04002188}
2189
Sricharan R96a299d2018-12-04 11:52:09 +05302190static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
Robin Murphya2d866f2017-08-08 14:56:15 +01002191{
2192 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
Sricharan R96a299d2018-12-04 11:52:09 +05302193 int ret;
2194
2195 ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
2196 if (ret)
2197 return ret;
Robin Murphya2d866f2017-08-08 14:56:15 +01002198
2199 arm_smmu_device_reset(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302200
Will Deacon45ae7cf2013-06-24 18:31:25 +01002201 return 0;
2202}
2203
Sricharan R96a299d2018-12-04 11:52:09 +05302204static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
Dan Carpenter6614ee72013-08-21 09:34:20 +01002205{
Sricharan R96a299d2018-12-04 11:52:09 +05302206 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2207
2208 clk_bulk_disable(smmu->num_clks, smmu->clks);
2209
2210 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002211}
2212
Robin Murphya2d866f2017-08-08 14:56:15 +01002213static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
2214{
Sricharan R96a299d2018-12-04 11:52:09 +05302215 if (pm_runtime_suspended(dev))
2216 return 0;
Robin Murphya2d866f2017-08-08 14:56:15 +01002217
Sricharan R96a299d2018-12-04 11:52:09 +05302218 return arm_smmu_runtime_resume(dev);
Robin Murphya2d866f2017-08-08 14:56:15 +01002219}
2220
Sricharan R96a299d2018-12-04 11:52:09 +05302221static int __maybe_unused arm_smmu_pm_suspend(struct device *dev)
2222{
2223 if (pm_runtime_suspended(dev))
2224 return 0;
2225
2226 return arm_smmu_runtime_suspend(dev);
2227}
2228
2229static const struct dev_pm_ops arm_smmu_pm_ops = {
2230 SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_pm_suspend, arm_smmu_pm_resume)
2231 SET_RUNTIME_PM_OPS(arm_smmu_runtime_suspend,
2232 arm_smmu_runtime_resume, NULL)
2233};
Robin Murphya2d866f2017-08-08 14:56:15 +01002234
Will Deacon45ae7cf2013-06-24 18:31:25 +01002235static struct platform_driver arm_smmu_driver = {
2236 .driver = {
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002237 .name = "arm-smmu",
2238 .of_match_table = of_match_ptr(arm_smmu_of_match),
2239 .pm = &arm_smmu_pm_ops,
2240 .suppress_bind_attrs = true,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002241 },
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002242 .probe = arm_smmu_device_probe,
Nate Watterson7aa86192017-06-29 18:18:15 -04002243 .shutdown = arm_smmu_device_shutdown,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002244};
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002245builtin_platform_driver(arm_smmu_driver);