blob: 25876eb9266de683c1abf4e12926f22da12fe199 [file] [log] [blame]
Thomas Gleixner45051532019-05-29 16:57:47 -07001// SPDX-License-Identifier: GPL-2.0-only
Will Deacon45ae7cf2013-06-24 18:31:25 +01002/*
3 * IOMMU API for ARM architected SMMU implementations.
4 *
Will Deacon45ae7cf2013-06-24 18:31:25 +01005 * Copyright (C) 2013 ARM Limited
6 *
7 * Author: Will Deacon <will.deacon@arm.com>
8 *
9 * This driver currently supports:
10 * - SMMUv1 and v2 implementations
11 * - Stream-matching and stream-indexing
12 * - v7/v8 long-descriptor format
13 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010014 * - Context fault reporting
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +030015 * - Extended Stream ID (16 bit)
Will Deacon45ae7cf2013-06-24 18:31:25 +010016 */
17
18#define pr_fmt(fmt) "arm-smmu: " fmt
19
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +000020#include <linux/acpi.h>
21#include <linux/acpi_iort.h>
Robin Murphy0caf5f42019-08-15 19:37:23 +010022#include <linux/bitfield.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010023#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000024#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010025#include <linux/dma-mapping.h>
26#include <linux/err.h>
27#include <linux/interrupt.h>
28#include <linux/io.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000029#include <linux/iopoll.h>
Paul Gortmakeraddb672f2018-12-01 14:19:16 -050030#include <linux/init.h>
31#include <linux/moduleparam.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010033#include <linux/of_address.h>
Robin Murphyd6fc5d92016-09-12 17:13:52 +010034#include <linux/of_device.h>
Robin Murphyadfec2e2016-09-12 17:13:55 +010035#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010036#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010037#include <linux/platform_device.h>
Sricharan R96a299d2018-12-04 11:52:09 +053038#include <linux/pm_runtime.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/slab.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010040
41#include <linux/amba/bus.h>
Nipun Guptaeab03e22018-09-10 19:19:18 +053042#include <linux/fsl/mc.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010043
Robin Murphyc5fc6482019-08-15 19:37:32 +010044#include "arm-smmu.h"
Rob Clark2b037742017-08-09 10:43:03 -040045
Robin Murphy4e4abae2019-06-03 14:15:37 +020046/*
47 * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
48 * global register space are still, in fact, using a hypervisor to mediate it
49 * by trapping and emulating register accesses. Sadly, some deployed versions
50 * of said trapping code have bugs wherein they go horribly wrong for stores
51 * using r31 (i.e. XZR/WZR) as the source register.
52 */
53#define QCOM_DUMMY_VAL -1
54
Rob Clark2b037742017-08-09 10:43:03 -040055#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
56#define TLB_SPIN_COUNT 10
Will Deacon45ae7cf2013-06-24 18:31:25 +010057
Eric Augerf3ebee82017-01-19 20:57:55 +000058#define MSI_IOVA_BASE 0x8000000
59#define MSI_IOVA_LENGTH 0x100000
60
Will Deacon4cf740b2014-07-14 19:47:39 +010061static int force_stage;
Paul Gortmakeraddb672f2018-12-01 14:19:16 -050062/*
63 * not really modular, but the easiest way to keep compat with existing
64 * bootargs behaviour is to continue using module_param() here.
65 */
Robin Murphy25a1c962016-02-10 14:25:33 +000066module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +010067MODULE_PARM_DESC(force_stage,
68 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Douglas Anderson954a03b2019-03-01 11:20:17 -080069static bool disable_bypass =
70 IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT);
Robin Murphy25a1c962016-02-10 14:25:33 +000071module_param(disable_bypass, bool, S_IRUGO);
72MODULE_PARM_DESC(disable_bypass,
73 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +010074
Robin Murphy8e8b2032016-09-12 17:13:50 +010075struct arm_smmu_s2cr {
Robin Murphy588888a2016-09-12 17:13:54 +010076 struct iommu_group *group;
77 int count;
Robin Murphy8e8b2032016-09-12 17:13:50 +010078 enum arm_smmu_s2cr_type type;
79 enum arm_smmu_s2cr_privcfg privcfg;
80 u8 cbndx;
81};
82
83#define s2cr_init_val (struct arm_smmu_s2cr){ \
84 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
85}
86
Will Deacon45ae7cf2013-06-24 18:31:25 +010087struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +010088 u16 mask;
89 u16 id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +010090 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +010091};
92
Robin Murphy90df3732017-08-08 14:56:14 +010093struct arm_smmu_cb {
94 u64 ttbr[2];
95 u32 tcr[2];
96 u32 mair[2];
97 struct arm_smmu_cfg *cfg;
98};
99
Will Deacona9a1b0b2014-05-01 18:05:08 +0100100struct arm_smmu_master_cfg {
Robin Murphyf80cd882016-09-14 15:21:39 +0100101 struct arm_smmu_device *smmu;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100102 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100103};
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100104#define INVALID_SMENDX -1
Robin Murphyadfec2e2016-09-12 17:13:55 +0100105#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
106#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000107#define fwspec_smendx(fw, i) \
108 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphyadfec2e2016-09-12 17:13:55 +0100109#define for_each_cfg_sme(fw, i, idx) \
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000110 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100111
Robin Murphy021bb842016-09-14 15:26:46 +0100112static bool using_legacy_binding, using_generic_binding;
113
Sricharan Rd4a44f02018-12-04 11:52:10 +0530114static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
115{
116 if (pm_runtime_enabled(smmu->dev))
117 return pm_runtime_get_sync(smmu->dev);
118
119 return 0;
120}
121
122static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
123{
124 if (pm_runtime_enabled(smmu->dev))
125 pm_runtime_put(smmu->dev);
126}
127
Joerg Roedel1d672632015-03-26 13:43:10 +0100128static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
129{
130 return container_of(dom, struct arm_smmu_domain, domain);
131}
132
Will Deacon8f68f8e2014-07-15 11:27:08 +0100133static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100134{
135 if (dev_is_pci(dev)) {
136 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700137
Will Deacona9a1b0b2014-05-01 18:05:08 +0100138 while (!pci_is_root_bus(bus))
139 bus = bus->parent;
Robin Murphyf80cd882016-09-14 15:21:39 +0100140 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100141 }
142
Robin Murphyf80cd882016-09-14 15:21:39 +0100143 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100144}
145
Robin Murphyf80cd882016-09-14 15:21:39 +0100146static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100147{
Robin Murphyf80cd882016-09-14 15:21:39 +0100148 *((__be32 *)data) = cpu_to_be32(alias);
149 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100150}
151
Robin Murphyf80cd882016-09-14 15:21:39 +0100152static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100153{
Robin Murphyf80cd882016-09-14 15:21:39 +0100154 struct of_phandle_iterator *it = *(void **)data;
155 struct device_node *np = it->node;
156 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100157
Robin Murphyf80cd882016-09-14 15:21:39 +0100158 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
Uwe Kleine-Königc680e9a2019-08-24 15:28:45 +0200159 "#stream-id-cells", -1)
Robin Murphyf80cd882016-09-14 15:21:39 +0100160 if (it->node == np) {
161 *(void **)data = dev;
162 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700163 }
Robin Murphyf80cd882016-09-14 15:21:39 +0100164 it->node = np;
165 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100166}
167
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100168static struct platform_driver arm_smmu_driver;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100169static struct iommu_ops arm_smmu_ops;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100170
Robin Murphyadfec2e2016-09-12 17:13:55 +0100171static int arm_smmu_register_legacy_master(struct device *dev,
172 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100173{
Robin Murphyadfec2e2016-09-12 17:13:55 +0100174 struct device *smmu_dev;
Robin Murphyf80cd882016-09-14 15:21:39 +0100175 struct device_node *np;
176 struct of_phandle_iterator it;
177 void *data = &it;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100178 u32 *sids;
Robin Murphyf80cd882016-09-14 15:21:39 +0100179 __be32 pci_sid;
180 int err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100181
Robin Murphyf80cd882016-09-14 15:21:39 +0100182 np = dev_get_dev_node(dev);
183 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
184 of_node_put(np);
185 return -ENODEV;
186 }
187
188 it.node = np;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100189 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
190 __find_legacy_master_phandle);
Robin Murphyadfec2e2016-09-12 17:13:55 +0100191 smmu_dev = data;
Robin Murphyf80cd882016-09-14 15:21:39 +0100192 of_node_put(np);
193 if (err == 0)
194 return -ENODEV;
195 if (err < 0)
196 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100197
Robin Murphyf80cd882016-09-14 15:21:39 +0100198 if (dev_is_pci(dev)) {
199 /* "mmu-masters" assumes Stream ID == Requester ID */
200 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
201 &pci_sid);
202 it.cur = &pci_sid;
203 it.cur_count = 1;
204 }
205
Robin Murphyadfec2e2016-09-12 17:13:55 +0100206 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
207 &arm_smmu_ops);
208 if (err)
209 return err;
210
211 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
212 if (!sids)
Robin Murphyf80cd882016-09-14 15:21:39 +0100213 return -ENOMEM;
214
Robin Murphyadfec2e2016-09-12 17:13:55 +0100215 *smmu = dev_get_drvdata(smmu_dev);
216 of_phandle_iterator_args(&it, sids, it.cur_count);
217 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
218 kfree(sids);
219 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100220}
221
222static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
223{
224 int idx;
225
226 do {
227 idx = find_next_zero_bit(map, end, start);
228 if (idx == end)
229 return -ENOSPC;
230 } while (test_and_set_bit(idx, map));
231
232 return idx;
233}
234
235static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
236{
237 clear_bit(idx, map);
238}
239
240/* Wait for any pending TLB invalidations to complete */
Robin Murphy19713fd2019-08-15 19:37:30 +0100241static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
242 int sync, int status)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100243{
Robin Murphy8513c892017-03-30 17:56:32 +0100244 unsigned int spin_cnt, delay;
Robin Murphy19713fd2019-08-15 19:37:30 +0100245 u32 reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100246
Robin Murphyae2b60f2019-09-18 17:17:50 +0100247 if (smmu->impl && unlikely(smmu->impl->tlb_sync))
248 return smmu->impl->tlb_sync(smmu, page, sync, status);
249
Robin Murphy19713fd2019-08-15 19:37:30 +0100250 arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
Robin Murphy8513c892017-03-30 17:56:32 +0100251 for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
252 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100253 reg = arm_smmu_readl(smmu, page, status);
254 if (!(reg & sTLBGSTATUS_GSACTIVE))
Robin Murphy8513c892017-03-30 17:56:32 +0100255 return;
256 cpu_relax();
Will Deacon45ae7cf2013-06-24 18:31:25 +0100257 }
Robin Murphy8513c892017-03-30 17:56:32 +0100258 udelay(delay);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100259 }
Robin Murphy8513c892017-03-30 17:56:32 +0100260 dev_err_ratelimited(smmu->dev,
261 "TLB sync timed out -- SMMU may be deadlocked\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +0100262}
263
Robin Murphy11febfc2017-03-30 17:56:31 +0100264static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
Will Deacon1463fe42013-07-31 19:21:27 +0100265{
Will Deacon8e517e72017-07-06 15:55:48 +0100266 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100267
Will Deacon8e517e72017-07-06 15:55:48 +0100268 spin_lock_irqsave(&smmu->global_sync_lock, flags);
Robin Murphy00320ce2019-08-15 19:37:31 +0100269 __arm_smmu_tlb_sync(smmu, ARM_SMMU_GR0, ARM_SMMU_GR0_sTLBGSYNC,
Robin Murphy19713fd2019-08-15 19:37:30 +0100270 ARM_SMMU_GR0_sTLBGSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100271 spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000272}
273
Robin Murphyae2b60f2019-09-18 17:17:50 +0100274static void arm_smmu_tlb_sync_context(struct arm_smmu_domain *smmu_domain)
Will Deacon1463fe42013-07-31 19:21:27 +0100275{
Robin Murphy11febfc2017-03-30 17:56:31 +0100276 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon8e517e72017-07-06 15:55:48 +0100277 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100278
Will Deacon8e517e72017-07-06 15:55:48 +0100279 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy19713fd2019-08-15 19:37:30 +0100280 __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx),
281 ARM_SMMU_CB_TLBSYNC, ARM_SMMU_CB_TLBSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100282 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000283}
284
Robin Murphy11febfc2017-03-30 17:56:31 +0100285static void arm_smmu_tlb_inv_context_s1(void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000286{
287 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy44f68762018-09-20 17:10:27 +0100288 /*
Robin Murphy19713fd2019-08-15 19:37:30 +0100289 * The TLBI write may be relaxed, so ensure that PTEs cleared by the
290 * current CPU are visible beforehand.
Robin Murphy44f68762018-09-20 17:10:27 +0100291 */
Robin Murphy19713fd2019-08-15 19:37:30 +0100292 wmb();
293 arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
294 ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
Robin Murphyae2b60f2019-09-18 17:17:50 +0100295 arm_smmu_tlb_sync_context(smmu_domain);
Robin Murphy11febfc2017-03-30 17:56:31 +0100296}
297
298static void arm_smmu_tlb_inv_context_s2(void *cookie)
299{
300 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100301 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100302
Robin Murphy00320ce2019-08-15 19:37:31 +0100303 /* See above */
304 wmb();
305 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
Robin Murphy11febfc2017-03-30 17:56:31 +0100306 arm_smmu_tlb_sync_global(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100307}
308
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100309static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
Robin Murphy3370cb62019-09-18 17:17:49 +0100310 size_t granule, void *cookie, int reg)
Will Deacon518f7132014-11-14 17:17:54 +0000311{
312 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100313 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000314 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy3370cb62019-09-18 17:17:49 +0100315 int idx = cfg->cbndx;
Will Deacon518f7132014-11-14 17:17:54 +0000316
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100317 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
Will Deacon7d321bd32018-10-01 12:42:49 +0100318 wmb();
319
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100320 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
321 iova = (iova >> 12) << 12;
322 iova |= cfg->asid;
Robin Murphy75df1382015-12-07 18:18:52 +0000323 do {
Robin Murphy19713fd2019-08-15 19:37:30 +0100324 arm_smmu_cb_write(smmu, idx, reg, iova);
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100325 iova += granule;
326 } while (size -= granule);
327 } else {
328 iova >>= 12;
329 iova |= (u64)cfg->asid << 48;
330 do {
Robin Murphy19713fd2019-08-15 19:37:30 +0100331 arm_smmu_cb_writeq(smmu, idx, reg, iova);
Robin Murphy75df1382015-12-07 18:18:52 +0000332 iova += granule >> 12;
333 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000334 }
335}
336
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100337static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
Robin Murphy3370cb62019-09-18 17:17:49 +0100338 size_t granule, void *cookie, int reg)
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100339{
340 struct arm_smmu_domain *smmu_domain = cookie;
341 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy3370cb62019-09-18 17:17:49 +0100342 int idx = smmu_domain->cfg.cbndx;
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100343
344 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
345 wmb();
346
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100347 iova >>= 12;
348 do {
Robin Murphy61005762019-08-15 19:37:28 +0100349 if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy19713fd2019-08-15 19:37:30 +0100350 arm_smmu_cb_writeq(smmu, idx, reg, iova);
Robin Murphy61005762019-08-15 19:37:28 +0100351 else
Robin Murphy19713fd2019-08-15 19:37:30 +0100352 arm_smmu_cb_write(smmu, idx, reg, iova);
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100353 iova += granule >> 12;
354 } while (size -= granule);
355}
356
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100357static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size,
358 size_t granule, void *cookie)
359{
Robin Murphy3370cb62019-09-18 17:17:49 +0100360 arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
361 ARM_SMMU_CB_S1_TLBIVA);
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100362 arm_smmu_tlb_sync_context(cookie);
363}
364
365static void arm_smmu_tlb_inv_leaf_s1(unsigned long iova, size_t size,
366 size_t granule, void *cookie)
367{
Robin Murphy3370cb62019-09-18 17:17:49 +0100368 arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
369 ARM_SMMU_CB_S1_TLBIVAL);
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100370 arm_smmu_tlb_sync_context(cookie);
371}
372
373static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather,
374 unsigned long iova, size_t granule,
375 void *cookie)
376{
Robin Murphy3370cb62019-09-18 17:17:49 +0100377 arm_smmu_tlb_inv_range_s1(iova, granule, granule, cookie,
378 ARM_SMMU_CB_S1_TLBIVAL);
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100379}
380
381static void arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size,
382 size_t granule, void *cookie)
383{
Robin Murphy3370cb62019-09-18 17:17:49 +0100384 arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie,
385 ARM_SMMU_CB_S2_TLBIIPAS2);
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100386 arm_smmu_tlb_sync_context(cookie);
387}
388
389static void arm_smmu_tlb_inv_leaf_s2(unsigned long iova, size_t size,
390 size_t granule, void *cookie)
391{
Robin Murphy3370cb62019-09-18 17:17:49 +0100392 arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie,
393 ARM_SMMU_CB_S2_TLBIIPAS2L);
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100394 arm_smmu_tlb_sync_context(cookie);
395}
396
397static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather,
398 unsigned long iova, size_t granule,
399 void *cookie)
400{
Robin Murphy3370cb62019-09-18 17:17:49 +0100401 arm_smmu_tlb_inv_range_s2(iova, granule, granule, cookie,
402 ARM_SMMU_CB_S2_TLBIIPAS2L);
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100403}
404
405static void arm_smmu_tlb_inv_any_s2_v1(unsigned long iova, size_t size,
406 size_t granule, void *cookie)
407{
408 arm_smmu_tlb_inv_context_s2(cookie);
409}
Robin Murphy11febfc2017-03-30 17:56:31 +0100410/*
411 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
412 * almost negligible, but the benefit of getting the first one in as far ahead
413 * of the sync as possible is significant, hence we don't just make this a
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100414 * no-op and call arm_smmu_tlb_inv_context_s2() from .iotlb_sync as you might
415 * think.
Robin Murphy11febfc2017-03-30 17:56:31 +0100416 */
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100417static void arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather,
418 unsigned long iova, size_t granule,
419 void *cookie)
Robin Murphy11febfc2017-03-30 17:56:31 +0100420{
421 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy00320ce2019-08-15 19:37:31 +0100422 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy11febfc2017-03-30 17:56:31 +0100423
Robin Murphy00320ce2019-08-15 19:37:31 +0100424 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
Will Deacon7d321bd32018-10-01 12:42:49 +0100425 wmb();
426
Robin Murphy00320ce2019-08-15 19:37:31 +0100427 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
Robin Murphy11febfc2017-03-30 17:56:31 +0100428}
429
Will Deaconabfd6fe2019-07-02 16:44:41 +0100430static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops = {
431 .tlb = {
432 .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100433 .tlb_flush_walk = arm_smmu_tlb_inv_walk_s1,
434 .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s1,
435 .tlb_add_page = arm_smmu_tlb_add_page_s1,
Will Deaconabfd6fe2019-07-02 16:44:41 +0100436 },
Robin Murphy11febfc2017-03-30 17:56:31 +0100437};
438
Will Deaconabfd6fe2019-07-02 16:44:41 +0100439static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
440 .tlb = {
441 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100442 .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2,
443 .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s2,
444 .tlb_add_page = arm_smmu_tlb_add_page_s2,
Will Deaconabfd6fe2019-07-02 16:44:41 +0100445 },
Robin Murphy11febfc2017-03-30 17:56:31 +0100446};
447
Will Deaconabfd6fe2019-07-02 16:44:41 +0100448static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
449 .tlb = {
450 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100451 .tlb_flush_walk = arm_smmu_tlb_inv_any_s2_v1,
452 .tlb_flush_leaf = arm_smmu_tlb_inv_any_s2_v1,
453 .tlb_add_page = arm_smmu_tlb_add_page_s2_v1,
Will Deaconabfd6fe2019-07-02 16:44:41 +0100454 },
Will Deacon518f7132014-11-14 17:17:54 +0000455};
456
Will Deacon45ae7cf2013-06-24 18:31:25 +0100457static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
458{
Vivek Gautambc580b52019-04-22 12:40:36 +0530459 u32 fsr, fsynr, cbfrsynra;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100460 unsigned long iova;
461 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100462 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100463 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy19713fd2019-08-15 19:37:30 +0100464 int idx = smmu_domain->cfg.cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100465
Robin Murphy19713fd2019-08-15 19:37:30 +0100466 fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100467 if (!(fsr & FSR_FAULT))
468 return IRQ_NONE;
469
Robin Murphy19713fd2019-08-15 19:37:30 +0100470 fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
471 iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
472 cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100473
Will Deacon3714ce1d2016-08-05 19:49:45 +0100474 dev_err_ratelimited(smmu->dev,
Vivek Gautambc580b52019-04-22 12:40:36 +0530475 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
Robin Murphy19713fd2019-08-15 19:37:30 +0100476 fsr, iova, fsynr, cbfrsynra, idx);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100477
Robin Murphy19713fd2019-08-15 19:37:30 +0100478 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100479 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100480}
481
482static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
483{
484 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
485 struct arm_smmu_device *smmu = dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100486
Robin Murphy00320ce2019-08-15 19:37:31 +0100487 gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
488 gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0);
489 gfsynr1 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR1);
490 gfsynr2 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100491
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000492 if (!gfsr)
493 return IRQ_NONE;
494
Will Deacon45ae7cf2013-06-24 18:31:25 +0100495 dev_err_ratelimited(smmu->dev,
496 "Unexpected global fault, this could be serious\n");
497 dev_err_ratelimited(smmu->dev,
498 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
499 gfsr, gfsynr0, gfsynr1, gfsynr2);
500
Robin Murphy00320ce2019-08-15 19:37:31 +0100501 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr);
Will Deaconadaba322013-07-31 19:21:26 +0100502 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100503}
504
Will Deacon518f7132014-11-14 17:17:54 +0000505static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
506 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100507{
Will Deacon44680ee2014-06-25 11:29:12 +0100508 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy90df3732017-08-08 14:56:14 +0100509 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
510 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
511
512 cb->cfg = cfg;
513
Robin Murphy620565a2019-08-15 19:37:25 +0100514 /* TCR */
Robin Murphy90df3732017-08-08 14:56:14 +0100515 if (stage1) {
516 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
517 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
518 } else {
519 cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
520 cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Robin Murphy620565a2019-08-15 19:37:25 +0100521 cb->tcr[1] |= FIELD_PREP(TCR2_SEP, TCR2_SEP_UPSTREAM);
Robin Murphy90df3732017-08-08 14:56:14 +0100522 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy620565a2019-08-15 19:37:25 +0100523 cb->tcr[1] |= TCR2_AS;
Robin Murphy90df3732017-08-08 14:56:14 +0100524 }
525 } else {
526 cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
527 }
528
529 /* TTBRs */
530 if (stage1) {
531 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
532 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
533 cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
534 } else {
535 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Robin Murphy620565a2019-08-15 19:37:25 +0100536 cb->ttbr[0] |= FIELD_PREP(TTBRn_ASID, cfg->asid);
Robin Murphy90df3732017-08-08 14:56:14 +0100537 cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Robin Murphy620565a2019-08-15 19:37:25 +0100538 cb->ttbr[1] |= FIELD_PREP(TTBRn_ASID, cfg->asid);
Robin Murphy90df3732017-08-08 14:56:14 +0100539 }
540 } else {
541 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
542 }
543
544 /* MAIRs (stage-1 only) */
545 if (stage1) {
546 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
547 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
548 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
549 } else {
550 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
551 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
552 }
553 }
554}
555
556static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
557{
558 u32 reg;
559 bool stage1;
560 struct arm_smmu_cb *cb = &smmu->cbs[idx];
561 struct arm_smmu_cfg *cfg = cb->cfg;
Robin Murphy90df3732017-08-08 14:56:14 +0100562
563 /* Unassigned context banks only need disabling */
564 if (!cfg) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100565 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, 0);
Robin Murphy90df3732017-08-08 14:56:14 +0100566 return;
567 }
568
Will Deacon44680ee2014-06-25 11:29:12 +0100569 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100570
Robin Murphy90df3732017-08-08 14:56:14 +0100571 /* CBA2R */
Will Deacon4a1c93c2015-03-04 12:21:03 +0000572 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100573 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy5114e962019-08-15 19:37:24 +0100574 reg = CBA2R_VA64;
Robin Murphy7602b872016-04-28 17:12:09 +0100575 else
Robin Murphy5114e962019-08-15 19:37:24 +0100576 reg = 0;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800577 /* 16-bit VMIDs live in CBA2R */
578 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Robin Murphy5114e962019-08-15 19:37:24 +0100579 reg |= FIELD_PREP(CBA2R_VMID16, cfg->vmid);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800580
Robin Murphyaadbf212019-08-15 19:37:29 +0100581 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBA2R(idx), reg);
Will Deacon4a1c93c2015-03-04 12:21:03 +0000582 }
583
Will Deacon45ae7cf2013-06-24 18:31:25 +0100584 /* CBAR */
Robin Murphy5114e962019-08-15 19:37:24 +0100585 reg = FIELD_PREP(CBAR_TYPE, cfg->cbar);
Robin Murphyb7862e32016-04-13 18:13:03 +0100586 if (smmu->version < ARM_SMMU_V2)
Robin Murphy5114e962019-08-15 19:37:24 +0100587 reg |= FIELD_PREP(CBAR_IRPTNDX, cfg->irptndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100588
Will Deacon57ca90f2014-02-06 14:59:05 +0000589 /*
590 * Use the weakest shareability/memory types, so they are
591 * overridden by the ttbcr/pte.
592 */
593 if (stage1) {
Robin Murphy5114e962019-08-15 19:37:24 +0100594 reg |= FIELD_PREP(CBAR_S1_BPSHCFG, CBAR_S1_BPSHCFG_NSH) |
595 FIELD_PREP(CBAR_S1_MEMATTR, CBAR_S1_MEMATTR_WB);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800596 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
597 /* 8-bit VMIDs live in CBAR */
Robin Murphy5114e962019-08-15 19:37:24 +0100598 reg |= FIELD_PREP(CBAR_VMID, cfg->vmid);
Will Deacon57ca90f2014-02-06 14:59:05 +0000599 }
Robin Murphyaadbf212019-08-15 19:37:29 +0100600 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100601
Sunil Goutham125458a2017-03-28 16:11:12 +0530602 /*
Robin Murphy620565a2019-08-15 19:37:25 +0100603 * TCR
Sunil Goutham125458a2017-03-28 16:11:12 +0530604 * We must write this before the TTBRs, since it determines the
605 * access behaviour of some fields (in particular, ASID[15:8]).
606 */
Robin Murphy90df3732017-08-08 14:56:14 +0100607 if (stage1 && smmu->version > ARM_SMMU_V1)
Robin Murphy19713fd2019-08-15 19:37:30 +0100608 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]);
609 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100610
Will Deacon45ae7cf2013-06-24 18:31:25 +0100611 /* TTBRs */
Robin Murphy90df3732017-08-08 14:56:14 +0100612 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100613 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid);
614 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
615 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100616 } else {
Robin Murphy19713fd2019-08-15 19:37:30 +0100617 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
Robin Murphy90df3732017-08-08 14:56:14 +0100618 if (stage1)
Robin Murphy19713fd2019-08-15 19:37:30 +0100619 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR1,
620 cb->ttbr[1]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100621 }
622
Will Deacon518f7132014-11-14 17:17:54 +0000623 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100624 if (stage1) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100625 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]);
626 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100627 }
628
Will Deacon45ae7cf2013-06-24 18:31:25 +0100629 /* SCTLR */
Robin Murphy60705292016-08-11 17:44:06 +0100630 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100631 if (stage1)
632 reg |= SCTLR_S1_ASIDPNE;
Robin Murphy90df3732017-08-08 14:56:14 +0100633 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
634 reg |= SCTLR_E;
635
Robin Murphy19713fd2019-08-15 19:37:30 +0100636 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100637}
638
639static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100640 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100641{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100642 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000643 unsigned long ias, oas;
644 struct io_pgtable_ops *pgtbl_ops;
645 struct io_pgtable_cfg pgtbl_cfg;
646 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100647 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100648 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100649
Will Deacon518f7132014-11-14 17:17:54 +0000650 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100651 if (smmu_domain->smmu)
652 goto out_unlock;
653
Will Deacon61bc6712017-01-06 16:56:03 +0000654 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
655 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
656 smmu_domain->smmu = smmu;
657 goto out_unlock;
658 }
659
Will Deaconc752ce42014-06-25 22:46:31 +0100660 /*
661 * Mapping the requested stage onto what we support is surprisingly
662 * complicated, mainly because the spec allows S1+S2 SMMUs without
663 * support for nested translation. That means we end up with the
664 * following table:
665 *
666 * Requested Supported Actual
667 * S1 N S1
668 * S1 S1+S2 S1
669 * S1 S2 S2
670 * S1 S1 S1
671 * N N N
672 * N S1+S2 S2
673 * N S2 S2
674 * N S1 S1
675 *
676 * Note that you can't actually request stage-2 mappings.
677 */
678 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
679 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
680 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
681 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
682
Robin Murphy7602b872016-04-28 17:12:09 +0100683 /*
684 * Choosing a suitable context format is even more fiddly. Until we
685 * grow some way for the caller to express a preference, and/or move
686 * the decision into the io-pgtable code where it arguably belongs,
687 * just aim for the closest thing to the rest of the system, and hope
688 * that the hardware isn't esoteric enough that we can't assume AArch64
689 * support to be a superset of AArch32 support...
690 */
691 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
692 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100693 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
694 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
695 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
696 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
697 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100698 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
699 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
700 ARM_SMMU_FEAT_FMT_AARCH64_16K |
701 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
702 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
703
704 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
705 ret = -EINVAL;
706 goto out_unlock;
707 }
708
Will Deaconc752ce42014-06-25 22:46:31 +0100709 switch (smmu_domain->stage) {
710 case ARM_SMMU_DOMAIN_S1:
711 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
712 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000713 ias = smmu->va_size;
714 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100715 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000716 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +0100717 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +0000718 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100719 ias = min(ias, 32UL);
720 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +0100721 } else {
722 fmt = ARM_V7S;
723 ias = min(ias, 32UL);
724 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +0100725 }
Will Deaconabfd6fe2019-07-02 16:44:41 +0100726 smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops;
Will Deaconc752ce42014-06-25 22:46:31 +0100727 break;
728 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100729 /*
730 * We will likely want to change this if/when KVM gets
731 * involved.
732 */
Will Deaconc752ce42014-06-25 22:46:31 +0100733 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100734 cfg->cbar = CBAR_TYPE_S2_TRANS;
735 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000736 ias = smmu->ipa_size;
737 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100738 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000739 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100740 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000741 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100742 ias = min(ias, 40UL);
743 oas = min(oas, 40UL);
744 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100745 if (smmu->version == ARM_SMMU_V2)
Will Deaconabfd6fe2019-07-02 16:44:41 +0100746 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2;
Robin Murphy11febfc2017-03-30 17:56:31 +0100747 else
Will Deaconabfd6fe2019-07-02 16:44:41 +0100748 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1;
Will Deaconc752ce42014-06-25 22:46:31 +0100749 break;
750 default:
751 ret = -EINVAL;
752 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100753 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100754 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
755 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200756 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100757 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100758
Will Deacon44680ee2014-06-25 11:29:12 +0100759 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100760 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100761 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
762 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100763 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100764 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100765 }
766
Robin Murphy280b6832017-03-30 17:56:29 +0100767 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
Robin Murphyba7e4a02019-08-15 19:37:37 +0100768 cfg->vmid = cfg->cbndx + 1;
Robin Murphy280b6832017-03-30 17:56:29 +0100769 else
Robin Murphyba7e4a02019-08-15 19:37:37 +0100770 cfg->asid = cfg->cbndx;
771
772 smmu_domain->smmu = smmu;
773 if (smmu->impl && smmu->impl->init_context) {
774 ret = smmu->impl->init_context(smmu_domain);
775 if (ret)
776 goto out_unlock;
777 }
Robin Murphy280b6832017-03-30 17:56:29 +0100778
Will Deacon518f7132014-11-14 17:17:54 +0000779 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100780 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000781 .ias = ias,
782 .oas = oas,
Will Deacon4f418452019-06-25 12:51:25 +0100783 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
Will Deaconabfd6fe2019-07-02 16:44:41 +0100784 .tlb = &smmu_domain->flush_ops->tlb,
Robin Murphy2df7a252015-07-29 19:46:06 +0100785 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000786 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100787
Robin Murphy44f68762018-09-20 17:10:27 +0100788 if (smmu_domain->non_strict)
789 pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
790
Will Deacon518f7132014-11-14 17:17:54 +0000791 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
792 if (!pgtbl_ops) {
793 ret = -ENOMEM;
794 goto out_clear_smmu;
795 }
796
Robin Murphyd5466352016-05-09 17:20:09 +0100797 /* Update the domain's page sizes to reflect the page table format */
798 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Robin Murphy455eb7d2016-09-12 17:13:58 +0100799 domain->geometry.aperture_end = (1UL << ias) - 1;
800 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +0000801
802 /* Initialise the context bank with our page table cfg */
803 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
Robin Murphy90df3732017-08-08 14:56:14 +0100804 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000805
806 /*
807 * Request context fault interrupt. Do this last to avoid the
808 * handler seeing a half-initialised domain state.
809 */
Will Deacon44680ee2014-06-25 11:29:12 +0100810 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800811 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
812 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200813 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100814 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100815 cfg->irptndx, irq);
816 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100817 }
818
Will Deacon518f7132014-11-14 17:17:54 +0000819 mutex_unlock(&smmu_domain->init_mutex);
820
821 /* Publish page table ops for map/unmap */
822 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100823 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100824
Will Deacon518f7132014-11-14 17:17:54 +0000825out_clear_smmu:
826 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100827out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +0000828 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100829 return ret;
830}
831
832static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
833{
Joerg Roedel1d672632015-03-26 13:43:10 +0100834 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100835 struct arm_smmu_device *smmu = smmu_domain->smmu;
836 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Sricharan Rd4a44f02018-12-04 11:52:10 +0530837 int ret, irq;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100838
Will Deacon61bc6712017-01-06 16:56:03 +0000839 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100840 return;
841
Sricharan Rd4a44f02018-12-04 11:52:10 +0530842 ret = arm_smmu_rpm_get(smmu);
843 if (ret < 0)
844 return;
845
Will Deacon518f7132014-11-14 17:17:54 +0000846 /*
847 * Disable the context bank and free the page tables before freeing
848 * it.
849 */
Robin Murphy90df3732017-08-08 14:56:14 +0100850 smmu->cbs[cfg->cbndx].cfg = NULL;
851 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100852
Will Deacon44680ee2014-06-25 11:29:12 +0100853 if (cfg->irptndx != INVALID_IRPTNDX) {
854 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800855 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100856 }
857
Markus Elfring44830b02015-11-06 18:32:41 +0100858 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +0100859 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Sricharan Rd4a44f02018-12-04 11:52:10 +0530860
861 arm_smmu_rpm_put(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100862}
863
Joerg Roedel1d672632015-03-26 13:43:10 +0100864static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100865{
866 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100867
Will Deacon61bc6712017-01-06 16:56:03 +0000868 if (type != IOMMU_DOMAIN_UNMANAGED &&
869 type != IOMMU_DOMAIN_DMA &&
870 type != IOMMU_DOMAIN_IDENTITY)
Joerg Roedel1d672632015-03-26 13:43:10 +0100871 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100872 /*
873 * Allocate the domain and initialise some of its data structures.
874 * We can't really do anything meaningful until we've added a
875 * master.
876 */
877 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
878 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +0100879 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100880
Robin Murphy021bb842016-09-14 15:26:46 +0100881 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
882 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +0000883 kfree(smmu_domain);
884 return NULL;
885 }
886
Will Deacon518f7132014-11-14 17:17:54 +0000887 mutex_init(&smmu_domain->init_mutex);
Robin Murphy523d7422017-06-22 16:53:56 +0100888 spin_lock_init(&smmu_domain->cb_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +0100889
890 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100891}
892
Joerg Roedel1d672632015-03-26 13:43:10 +0100893static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100894{
Joerg Roedel1d672632015-03-26 13:43:10 +0100895 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +0100896
897 /*
898 * Free the domain resources. We assume that all devices have
899 * already been detached.
900 */
Robin Murphy9adb9592016-01-26 18:06:36 +0000901 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100902 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100903 kfree(smmu_domain);
904}
905
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100906static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
907{
908 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphy0caf5f42019-08-15 19:37:23 +0100909 u32 reg = FIELD_PREP(SMR_ID, smr->id) | FIELD_PREP(SMR_MASK, smr->mask);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100910
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300911 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100912 reg |= SMR_VALID;
Robin Murphy00320ce2019-08-15 19:37:31 +0100913 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100914}
915
Robin Murphy8e8b2032016-09-12 17:13:50 +0100916static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
917{
918 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
Robin Murphy0caf5f42019-08-15 19:37:23 +0100919 u32 reg = FIELD_PREP(S2CR_TYPE, s2cr->type) |
920 FIELD_PREP(S2CR_CBNDX, s2cr->cbndx) |
921 FIELD_PREP(S2CR_PRIVCFG, s2cr->privcfg);
Robin Murphy8e8b2032016-09-12 17:13:50 +0100922
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300923 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
924 smmu->smrs[idx].valid)
925 reg |= S2CR_EXIDVALID;
Robin Murphy00320ce2019-08-15 19:37:31 +0100926 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
Robin Murphy8e8b2032016-09-12 17:13:50 +0100927}
928
929static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
930{
931 arm_smmu_write_s2cr(smmu, idx);
932 if (smmu->smrs)
933 arm_smmu_write_smr(smmu, idx);
934}
935
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300936/*
937 * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
938 * should be called after sCR0 is written.
939 */
940static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
941{
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300942 u32 smr;
943
944 if (!smmu->smrs)
945 return;
946
947 /*
948 * SMR.ID bits may not be preserved if the corresponding MASK
949 * bits are set, so check each one separately. We can reject
950 * masters later if they try to claim IDs outside these masks.
951 */
Robin Murphy0caf5f42019-08-15 19:37:23 +0100952 smr = FIELD_PREP(SMR_ID, smmu->streamid_mask);
Robin Murphy00320ce2019-08-15 19:37:31 +0100953 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(0), smr);
954 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(0));
Robin Murphy0caf5f42019-08-15 19:37:23 +0100955 smmu->streamid_mask = FIELD_GET(SMR_ID, smr);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300956
Robin Murphy0caf5f42019-08-15 19:37:23 +0100957 smr = FIELD_PREP(SMR_MASK, smmu->streamid_mask);
Robin Murphy00320ce2019-08-15 19:37:31 +0100958 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(0), smr);
959 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(0));
Robin Murphy0caf5f42019-08-15 19:37:23 +0100960 smmu->smr_mask_mask = FIELD_GET(SMR_MASK, smr);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300961}
962
Robin Murphy588888a2016-09-12 17:13:54 +0100963static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100964{
965 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy588888a2016-09-12 17:13:54 +0100966 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100967
Robin Murphy588888a2016-09-12 17:13:54 +0100968 /* Stream indexing is blissfully easy */
969 if (!smrs)
970 return id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100971
Robin Murphy588888a2016-09-12 17:13:54 +0100972 /* Validating SMRs is... less so */
973 for (i = 0; i < smmu->num_mapping_groups; ++i) {
974 if (!smrs[i].valid) {
975 /*
976 * Note the first free entry we come across, which
977 * we'll claim in the end if nothing else matches.
978 */
979 if (free_idx < 0)
980 free_idx = i;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100981 continue;
982 }
Robin Murphy588888a2016-09-12 17:13:54 +0100983 /*
984 * If the new entry is _entirely_ matched by an existing entry,
985 * then reuse that, with the guarantee that there also cannot
986 * be any subsequent conflicting entries. In normal use we'd
987 * expect simply identical entries for this case, but there's
988 * no harm in accommodating the generalisation.
989 */
990 if ((mask & smrs[i].mask) == mask &&
991 !((id ^ smrs[i].id) & ~smrs[i].mask))
992 return i;
993 /*
994 * If the new entry has any other overlap with an existing one,
995 * though, then there always exists at least one stream ID
996 * which would cause a conflict, and we can't allow that risk.
997 */
998 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
999 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001000 }
1001
Robin Murphy588888a2016-09-12 17:13:54 +01001002 return free_idx;
1003}
1004
1005static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1006{
1007 if (--smmu->s2crs[idx].count)
1008 return false;
1009
1010 smmu->s2crs[idx] = s2cr_init_val;
1011 if (smmu->smrs)
1012 smmu->smrs[idx].valid = false;
1013
1014 return true;
1015}
1016
1017static int arm_smmu_master_alloc_smes(struct device *dev)
1018{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001019 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001020 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy588888a2016-09-12 17:13:54 +01001021 struct arm_smmu_device *smmu = cfg->smmu;
1022 struct arm_smmu_smr *smrs = smmu->smrs;
1023 struct iommu_group *group;
1024 int i, idx, ret;
1025
1026 mutex_lock(&smmu->stream_map_mutex);
1027 /* Figure out a viable stream map entry allocation */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001028 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001029 u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]);
1030 u16 mask = FIELD_GET(SMR_MASK, fwspec->ids[i]);
Robin Murphy021bb842016-09-14 15:26:46 +01001031
Robin Murphy588888a2016-09-12 17:13:54 +01001032 if (idx != INVALID_SMENDX) {
1033 ret = -EEXIST;
1034 goto out_err;
1035 }
1036
Robin Murphy021bb842016-09-14 15:26:46 +01001037 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy588888a2016-09-12 17:13:54 +01001038 if (ret < 0)
1039 goto out_err;
1040
1041 idx = ret;
1042 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy021bb842016-09-14 15:26:46 +01001043 smrs[idx].id = sid;
1044 smrs[idx].mask = mask;
Robin Murphy588888a2016-09-12 17:13:54 +01001045 smrs[idx].valid = true;
1046 }
1047 smmu->s2crs[idx].count++;
1048 cfg->smendx[i] = (s16)idx;
1049 }
1050
1051 group = iommu_group_get_for_dev(dev);
1052 if (!group)
1053 group = ERR_PTR(-ENOMEM);
1054 if (IS_ERR(group)) {
1055 ret = PTR_ERR(group);
1056 goto out_err;
1057 }
1058 iommu_group_put(group);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001059
Will Deacon45ae7cf2013-06-24 18:31:25 +01001060 /* It worked! Now, poke the actual hardware */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001061 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001062 arm_smmu_write_sme(smmu, idx);
1063 smmu->s2crs[idx].group = group;
1064 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001065
Robin Murphy588888a2016-09-12 17:13:54 +01001066 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001067 return 0;
1068
Robin Murphy588888a2016-09-12 17:13:54 +01001069out_err:
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001070 while (i--) {
Robin Murphy588888a2016-09-12 17:13:54 +01001071 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001072 cfg->smendx[i] = INVALID_SMENDX;
1073 }
Robin Murphy588888a2016-09-12 17:13:54 +01001074 mutex_unlock(&smmu->stream_map_mutex);
1075 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001076}
1077
Robin Murphyadfec2e2016-09-12 17:13:55 +01001078static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001079{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001080 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1081 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphyd3097e32016-09-12 17:13:53 +01001082 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001083
Robin Murphy588888a2016-09-12 17:13:54 +01001084 mutex_lock(&smmu->stream_map_mutex);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001085 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001086 if (arm_smmu_free_sme(smmu, idx))
1087 arm_smmu_write_sme(smmu, idx);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001088 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001089 }
Robin Murphy588888a2016-09-12 17:13:54 +01001090 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001091}
1092
Will Deacon45ae7cf2013-06-24 18:31:25 +01001093static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphyadfec2e2016-09-12 17:13:55 +01001094 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001095{
Will Deacon44680ee2014-06-25 11:29:12 +01001096 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001097 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001098 u8 cbndx = smmu_domain->cfg.cbndx;
Will Deacon61bc6712017-01-06 16:56:03 +00001099 enum arm_smmu_s2cr_type type;
Robin Murphy588888a2016-09-12 17:13:54 +01001100 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001101
Will Deacon61bc6712017-01-06 16:56:03 +00001102 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
1103 type = S2CR_TYPE_BYPASS;
1104 else
1105 type = S2CR_TYPE_TRANS;
1106
Robin Murphyadfec2e2016-09-12 17:13:55 +01001107 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy8e8b2032016-09-12 17:13:50 +01001108 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy588888a2016-09-12 17:13:54 +01001109 continue;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001110
Robin Murphy8e8b2032016-09-12 17:13:50 +01001111 s2cr[idx].type = type;
Sricharan Re1989802017-01-06 18:58:15 +05301112 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001113 s2cr[idx].cbndx = cbndx;
1114 arm_smmu_write_s2cr(smmu, idx);
Will Deacon43b412b2014-07-15 11:22:24 +01001115 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001116 return 0;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001117}
1118
Will Deacon45ae7cf2013-06-24 18:31:25 +01001119static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1120{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001121 int ret;
Joerg Roedel9b468f72018-11-29 14:01:00 +01001122 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001123 struct arm_smmu_device *smmu;
Joerg Roedel1d672632015-03-26 13:43:10 +01001124 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001125
Robin Murphyadfec2e2016-09-12 17:13:55 +01001126 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001127 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1128 return -ENXIO;
1129 }
1130
Robin Murphyfba4f8e2016-10-17 12:06:21 +01001131 /*
1132 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1133 * domains between of_xlate() and add_device() - we have no way to cope
1134 * with that, so until ARM gets converted to rely on groups and default
1135 * domains, just say no (but more politely than by dereferencing NULL).
1136 * This should be at least a WARN_ON once that's sorted.
1137 */
1138 if (!fwspec->iommu_priv)
1139 return -ENODEV;
1140
Robin Murphyadfec2e2016-09-12 17:13:55 +01001141 smmu = fwspec_smmu(fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301142
1143 ret = arm_smmu_rpm_get(smmu);
1144 if (ret < 0)
1145 return ret;
1146
Will Deacon518f7132014-11-14 17:17:54 +00001147 /* Ensure that the domain is finalised */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001148 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001149 if (ret < 0)
Sricharan Rd4a44f02018-12-04 11:52:10 +05301150 goto rpm_put;
Will Deacon518f7132014-11-14 17:17:54 +00001151
Will Deacon45ae7cf2013-06-24 18:31:25 +01001152 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001153 * Sanity check the domain. We don't support domains across
1154 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001155 */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001156 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001157 dev_err(dev,
1158 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphyadfec2e2016-09-12 17:13:55 +01001159 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Sricharan Rd4a44f02018-12-04 11:52:10 +05301160 ret = -EINVAL;
1161 goto rpm_put;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001162 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001163
1164 /* Looks ok, so add the device to the domain */
Sricharan Rd4a44f02018-12-04 11:52:10 +05301165 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
1166
1167rpm_put:
1168 arm_smmu_rpm_put(smmu);
1169 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001170}
1171
Will Deacon45ae7cf2013-06-24 18:31:25 +01001172static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001173 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001174{
Robin Murphy523d7422017-06-22 16:53:56 +01001175 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301176 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1177 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001178
Will Deacon518f7132014-11-14 17:17:54 +00001179 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001180 return -ENODEV;
1181
Sricharan Rd4a44f02018-12-04 11:52:10 +05301182 arm_smmu_rpm_get(smmu);
1183 ret = ops->map(ops, iova, paddr, size, prot);
1184 arm_smmu_rpm_put(smmu);
1185
1186 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001187}
1188
1189static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
Will Deacon56f8af52019-07-02 16:44:06 +01001190 size_t size, struct iommu_iotlb_gather *gather)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001191{
Robin Murphy523d7422017-06-22 16:53:56 +01001192 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301193 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1194 size_t ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001195
Will Deacon518f7132014-11-14 17:17:54 +00001196 if (!ops)
1197 return 0;
1198
Sricharan Rd4a44f02018-12-04 11:52:10 +05301199 arm_smmu_rpm_get(smmu);
Will Deacona2d3a382019-07-02 16:44:58 +01001200 ret = ops->unmap(ops, iova, size, gather);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301201 arm_smmu_rpm_put(smmu);
1202
1203 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001204}
1205
Robin Murphy44f68762018-09-20 17:10:27 +01001206static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
1207{
1208 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301209 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy44f68762018-09-20 17:10:27 +01001210
Will Deaconabfd6fe2019-07-02 16:44:41 +01001211 if (smmu_domain->flush_ops) {
Sricharan Rd4a44f02018-12-04 11:52:10 +05301212 arm_smmu_rpm_get(smmu);
Will Deaconabfd6fe2019-07-02 16:44:41 +01001213 smmu_domain->flush_ops->tlb.tlb_flush_all(smmu_domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301214 arm_smmu_rpm_put(smmu);
1215 }
Robin Murphy44f68762018-09-20 17:10:27 +01001216}
1217
Will Deacon56f8af52019-07-02 16:44:06 +01001218static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
1219 struct iommu_iotlb_gather *gather)
Robin Murphy32b12442017-09-28 15:55:01 +01001220{
1221 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301222 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy32b12442017-09-28 15:55:01 +01001223
Robin Murphyae2b60f2019-09-18 17:17:50 +01001224 if (!smmu)
1225 return;
1226
1227 arm_smmu_rpm_get(smmu);
1228 if (smmu->version == ARM_SMMU_V2 ||
1229 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1230 arm_smmu_tlb_sync_context(smmu_domain);
1231 else
1232 arm_smmu_tlb_sync_global(smmu);
1233 arm_smmu_rpm_put(smmu);
Robin Murphy32b12442017-09-28 15:55:01 +01001234}
1235
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001236static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1237 dma_addr_t iova)
1238{
Joerg Roedel1d672632015-03-26 13:43:10 +01001239 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001240 struct arm_smmu_device *smmu = smmu_domain->smmu;
1241 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1242 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1243 struct device *dev = smmu->dev;
Robin Murphy19713fd2019-08-15 19:37:30 +01001244 void __iomem *reg;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001245 u32 tmp;
1246 u64 phys;
Robin Murphy523d7422017-06-22 16:53:56 +01001247 unsigned long va, flags;
Robin Murphy19713fd2019-08-15 19:37:30 +01001248 int ret, idx = cfg->cbndx;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301249
1250 ret = arm_smmu_rpm_get(smmu);
1251 if (ret < 0)
1252 return 0;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001253
Robin Murphy523d7422017-06-22 16:53:56 +01001254 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy661d9622015-05-27 17:09:34 +01001255 va = iova & ~0xfffUL;
Robin Murphy61005762019-08-15 19:37:28 +01001256 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy19713fd2019-08-15 19:37:30 +01001257 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
Robin Murphy61005762019-08-15 19:37:28 +01001258 else
Robin Murphy19713fd2019-08-15 19:37:30 +01001259 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001260
Robin Murphy19713fd2019-08-15 19:37:30 +01001261 reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR;
1262 if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ATSR_ACTIVE), 5, 50)) {
Robin Murphy523d7422017-06-22 16:53:56 +01001263 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001264 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001265 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001266 &iova);
1267 return ops->iova_to_phys(ops, iova);
1268 }
1269
Robin Murphy19713fd2019-08-15 19:37:30 +01001270 phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR);
Robin Murphy523d7422017-06-22 16:53:56 +01001271 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001272 if (phys & CB_PAR_F) {
1273 dev_err(dev, "translation fault!\n");
1274 dev_err(dev, "PAR = 0x%llx\n", phys);
1275 return 0;
1276 }
1277
Sricharan Rd4a44f02018-12-04 11:52:10 +05301278 arm_smmu_rpm_put(smmu);
1279
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001280 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1281}
1282
Will Deacon45ae7cf2013-06-24 18:31:25 +01001283static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001284 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001285{
Joerg Roedel1d672632015-03-26 13:43:10 +01001286 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Robin Murphy523d7422017-06-22 16:53:56 +01001287 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001288
Sunil Gouthambdf95922017-04-25 15:27:52 +05301289 if (domain->type == IOMMU_DOMAIN_IDENTITY)
1290 return iova;
1291
Will Deacon518f7132014-11-14 17:17:54 +00001292 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001293 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001294
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001295 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
Robin Murphy523d7422017-06-22 16:53:56 +01001296 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1297 return arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001298
Robin Murphy523d7422017-06-22 16:53:56 +01001299 return ops->iova_to_phys(ops, iova);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001300}
1301
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001302static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001303{
Will Deacond0948942014-06-24 17:30:10 +01001304 switch (cap) {
1305 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001306 /*
1307 * Return true here as the SMMU can always send out coherent
1308 * requests.
1309 */
1310 return true;
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001311 case IOMMU_CAP_NOEXEC:
1312 return true;
Will Deacond0948942014-06-24 17:30:10 +01001313 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001314 return false;
Will Deacond0948942014-06-24 17:30:10 +01001315 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001316}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001317
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001318static
1319struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
Robin Murphy021bb842016-09-14 15:26:46 +01001320{
Suzuki K Poulose67843bb2019-07-23 23:18:34 +01001321 struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver,
1322 fwnode);
Robin Murphy021bb842016-09-14 15:26:46 +01001323 put_device(dev);
1324 return dev ? dev_get_drvdata(dev) : NULL;
1325}
1326
Will Deacon03edb222015-01-19 14:27:33 +00001327static int arm_smmu_add_device(struct device *dev)
1328{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001329 struct arm_smmu_device *smmu;
Robin Murphyf80cd882016-09-14 15:21:39 +01001330 struct arm_smmu_master_cfg *cfg;
Joerg Roedel9b468f72018-11-29 14:01:00 +01001331 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001332 int i, ret;
1333
Robin Murphy021bb842016-09-14 15:26:46 +01001334 if (using_legacy_binding) {
1335 ret = arm_smmu_register_legacy_master(dev, &smmu);
Artem Savkova7990c62017-08-08 12:26:02 +02001336
1337 /*
1338 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
1339 * will allocate/initialise a new one. Thus we need to update fwspec for
1340 * later use.
1341 */
Joerg Roedel9b468f72018-11-29 14:01:00 +01001342 fwspec = dev_iommu_fwspec_get(dev);
Robin Murphy021bb842016-09-14 15:26:46 +01001343 if (ret)
1344 goto out_free;
Robin Murphy3c117b52016-11-02 17:31:32 +00001345 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001346 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
Robin Murphy021bb842016-09-14 15:26:46 +01001347 } else {
1348 return -ENODEV;
1349 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001350
1351 ret = -EINVAL;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001352 for (i = 0; i < fwspec->num_ids; i++) {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001353 u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]);
1354 u16 mask = FIELD_GET(SMR_MASK, fwspec->ids[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01001355
Robin Murphyadfec2e2016-09-12 17:13:55 +01001356 if (sid & ~smmu->streamid_mask) {
Robin Murphyf80cd882016-09-14 15:21:39 +01001357 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy021bb842016-09-14 15:26:46 +01001358 sid, smmu->streamid_mask);
1359 goto out_free;
1360 }
1361 if (mask & ~smmu->smr_mask_mask) {
1362 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
Peng Fan6323f472017-04-21 17:03:36 +08001363 mask, smmu->smr_mask_mask);
Robin Murphyf80cd882016-09-14 15:21:39 +01001364 goto out_free;
1365 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001366 }
Will Deacon03edb222015-01-19 14:27:33 +00001367
Robin Murphyadfec2e2016-09-12 17:13:55 +01001368 ret = -ENOMEM;
1369 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1370 GFP_KERNEL);
1371 if (!cfg)
1372 goto out_free;
1373
1374 cfg->smmu = smmu;
1375 fwspec->iommu_priv = cfg;
1376 while (i--)
1377 cfg->smendx[i] = INVALID_SMENDX;
1378
Sricharan Rd4a44f02018-12-04 11:52:10 +05301379 ret = arm_smmu_rpm_get(smmu);
1380 if (ret < 0)
1381 goto out_cfg_free;
1382
Robin Murphy588888a2016-09-12 17:13:54 +01001383 ret = arm_smmu_master_alloc_smes(dev);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301384 arm_smmu_rpm_put(smmu);
1385
Robin Murphyadfec2e2016-09-12 17:13:55 +01001386 if (ret)
Vivek Gautamc54451a2017-07-06 15:07:00 +05301387 goto out_cfg_free;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001388
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001389 iommu_device_link(&smmu->iommu, dev);
1390
Sricharan R655e3642018-12-04 11:52:11 +05301391 device_link_add(dev, smmu->dev,
1392 DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
1393
Robin Murphyadfec2e2016-09-12 17:13:55 +01001394 return 0;
Robin Murphyf80cd882016-09-14 15:21:39 +01001395
Vivek Gautamc54451a2017-07-06 15:07:00 +05301396out_cfg_free:
1397 kfree(cfg);
Robin Murphyf80cd882016-09-14 15:21:39 +01001398out_free:
Robin Murphyadfec2e2016-09-12 17:13:55 +01001399 iommu_fwspec_free(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001400 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00001401}
1402
Will Deacon45ae7cf2013-06-24 18:31:25 +01001403static void arm_smmu_remove_device(struct device *dev)
1404{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001405 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001406 struct arm_smmu_master_cfg *cfg;
1407 struct arm_smmu_device *smmu;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301408 int ret;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001409
Robin Murphyadfec2e2016-09-12 17:13:55 +01001410 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyf80cd882016-09-14 15:21:39 +01001411 return;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001412
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001413 cfg = fwspec->iommu_priv;
1414 smmu = cfg->smmu;
1415
Sricharan Rd4a44f02018-12-04 11:52:10 +05301416 ret = arm_smmu_rpm_get(smmu);
1417 if (ret < 0)
1418 return;
1419
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001420 iommu_device_unlink(&smmu->iommu, dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001421 arm_smmu_master_free_smes(fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301422
1423 arm_smmu_rpm_put(smmu);
1424
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001425 iommu_group_remove_device(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001426 kfree(fwspec->iommu_priv);
1427 iommu_fwspec_free(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001428}
1429
Joerg Roedelaf659932015-10-21 23:51:41 +02001430static struct iommu_group *arm_smmu_device_group(struct device *dev)
1431{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001432 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001433 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy588888a2016-09-12 17:13:54 +01001434 struct iommu_group *group = NULL;
1435 int i, idx;
1436
Robin Murphyadfec2e2016-09-12 17:13:55 +01001437 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001438 if (group && smmu->s2crs[idx].group &&
1439 group != smmu->s2crs[idx].group)
1440 return ERR_PTR(-EINVAL);
1441
1442 group = smmu->s2crs[idx].group;
1443 }
1444
1445 if (group)
Robin Murphye1b44cb2016-11-11 17:59:22 +00001446 return iommu_group_ref_get(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001447
1448 if (dev_is_pci(dev))
1449 group = pci_device_group(dev);
Nipun Guptaeab03e22018-09-10 19:19:18 +05301450 else if (dev_is_fsl_mc(dev))
1451 group = fsl_mc_device_group(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001452 else
1453 group = generic_device_group(dev);
1454
Joerg Roedelaf659932015-10-21 23:51:41 +02001455 return group;
1456}
1457
Will Deaconc752ce42014-06-25 22:46:31 +01001458static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1459 enum iommu_attr attr, void *data)
1460{
Joerg Roedel1d672632015-03-26 13:43:10 +01001461 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001462
Robin Murphy44f68762018-09-20 17:10:27 +01001463 switch(domain->type) {
1464 case IOMMU_DOMAIN_UNMANAGED:
1465 switch (attr) {
1466 case DOMAIN_ATTR_NESTING:
1467 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1468 return 0;
1469 default:
1470 return -ENODEV;
1471 }
1472 break;
1473 case IOMMU_DOMAIN_DMA:
1474 switch (attr) {
1475 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1476 *(int *)data = smmu_domain->non_strict;
1477 return 0;
1478 default:
1479 return -ENODEV;
1480 }
1481 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001482 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001483 return -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001484 }
1485}
1486
1487static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1488 enum iommu_attr attr, void *data)
1489{
Will Deacon518f7132014-11-14 17:17:54 +00001490 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001491 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001492
Will Deacon518f7132014-11-14 17:17:54 +00001493 mutex_lock(&smmu_domain->init_mutex);
1494
Robin Murphy44f68762018-09-20 17:10:27 +01001495 switch(domain->type) {
1496 case IOMMU_DOMAIN_UNMANAGED:
1497 switch (attr) {
1498 case DOMAIN_ATTR_NESTING:
1499 if (smmu_domain->smmu) {
1500 ret = -EPERM;
1501 goto out_unlock;
1502 }
1503
1504 if (*(int *)data)
1505 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1506 else
1507 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1508 break;
1509 default:
1510 ret = -ENODEV;
Will Deacon518f7132014-11-14 17:17:54 +00001511 }
Robin Murphy44f68762018-09-20 17:10:27 +01001512 break;
1513 case IOMMU_DOMAIN_DMA:
1514 switch (attr) {
1515 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1516 smmu_domain->non_strict = *(int *)data;
1517 break;
1518 default:
1519 ret = -ENODEV;
1520 }
Will Deacon518f7132014-11-14 17:17:54 +00001521 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001522 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001523 ret = -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001524 }
Will Deacon518f7132014-11-14 17:17:54 +00001525out_unlock:
1526 mutex_unlock(&smmu_domain->init_mutex);
1527 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001528}
1529
Robin Murphy021bb842016-09-14 15:26:46 +01001530static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1531{
Robin Murphy56fbf602017-03-31 12:03:33 +01001532 u32 mask, fwid = 0;
Robin Murphy021bb842016-09-14 15:26:46 +01001533
1534 if (args->args_count > 0)
Robin Murphy0caf5f42019-08-15 19:37:23 +01001535 fwid |= FIELD_PREP(SMR_ID, args->args[0]);
Robin Murphy021bb842016-09-14 15:26:46 +01001536
1537 if (args->args_count > 1)
Robin Murphy0caf5f42019-08-15 19:37:23 +01001538 fwid |= FIELD_PREP(SMR_MASK, args->args[1]);
Robin Murphy56fbf602017-03-31 12:03:33 +01001539 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
Robin Murphy0caf5f42019-08-15 19:37:23 +01001540 fwid |= FIELD_PREP(SMR_MASK, mask);
Robin Murphy021bb842016-09-14 15:26:46 +01001541
1542 return iommu_fwspec_add_ids(dev, &fwid, 1);
1543}
1544
Eric Augerf3ebee82017-01-19 20:57:55 +00001545static void arm_smmu_get_resv_regions(struct device *dev,
1546 struct list_head *head)
1547{
1548 struct iommu_resv_region *region;
1549 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1550
1551 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00001552 prot, IOMMU_RESV_SW_MSI);
Eric Augerf3ebee82017-01-19 20:57:55 +00001553 if (!region)
1554 return;
1555
1556 list_add_tail(&region->list, head);
Robin Murphy273df962017-03-16 17:00:19 +00001557
1558 iommu_dma_get_resv_regions(dev, head);
Eric Augerf3ebee82017-01-19 20:57:55 +00001559}
1560
1561static void arm_smmu_put_resv_regions(struct device *dev,
1562 struct list_head *head)
1563{
1564 struct iommu_resv_region *entry, *next;
1565
1566 list_for_each_entry_safe(entry, next, head, list)
1567 kfree(entry);
1568}
1569
Will Deacon518f7132014-11-14 17:17:54 +00001570static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001571 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001572 .domain_alloc = arm_smmu_domain_alloc,
1573 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001574 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001575 .map = arm_smmu_map,
1576 .unmap = arm_smmu_unmap,
Robin Murphy44f68762018-09-20 17:10:27 +01001577 .flush_iotlb_all = arm_smmu_flush_iotlb_all,
Robin Murphy32b12442017-09-28 15:55:01 +01001578 .iotlb_sync = arm_smmu_iotlb_sync,
Will Deaconc752ce42014-06-25 22:46:31 +01001579 .iova_to_phys = arm_smmu_iova_to_phys,
1580 .add_device = arm_smmu_add_device,
1581 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001582 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001583 .domain_get_attr = arm_smmu_domain_get_attr,
1584 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy021bb842016-09-14 15:26:46 +01001585 .of_xlate = arm_smmu_of_xlate,
Eric Augerf3ebee82017-01-19 20:57:55 +00001586 .get_resv_regions = arm_smmu_get_resv_regions,
1587 .put_resv_regions = arm_smmu_put_resv_regions,
Will Deacon518f7132014-11-14 17:17:54 +00001588 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001589};
1590
1591static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1592{
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001593 int i;
Robin Murphy62b993a2019-08-15 19:37:36 +01001594 u32 reg;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001595
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001596 /* clear global FSR */
Robin Murphy00320ce2019-08-15 19:37:31 +01001597 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
1598 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001599
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001600 /*
1601 * Reset stream mapping groups: Initial values mark all SMRn as
1602 * invalid and all S2CRn as bypass unless overridden.
1603 */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001604 for (i = 0; i < smmu->num_mapping_groups; ++i)
1605 arm_smmu_write_sme(smmu, i);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001606
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001607 /* Make sure all context banks are disabled and clear CB_FSR */
1608 for (i = 0; i < smmu->num_context_banks; ++i) {
Robin Murphy90df3732017-08-08 14:56:14 +01001609 arm_smmu_write_context_bank(smmu, i);
Robin Murphy19713fd2019-08-15 19:37:30 +01001610 arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, FSR_FAULT);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001611 }
Will Deacon1463fe42013-07-31 19:21:27 +01001612
Will Deacon45ae7cf2013-06-24 18:31:25 +01001613 /* Invalidate the TLB, just in case */
Robin Murphy00320ce2019-08-15 19:37:31 +01001614 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLH, QCOM_DUMMY_VAL);
1615 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLNSNH, QCOM_DUMMY_VAL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001616
Robin Murphy00320ce2019-08-15 19:37:31 +01001617 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001618
Will Deacon45ae7cf2013-06-24 18:31:25 +01001619 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001620 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001621
1622 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001623 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001624
Robin Murphy25a1c962016-02-10 14:25:33 +00001625 /* Enable client access, handling unmatched streams as appropriate */
1626 reg &= ~sCR0_CLIENTPD;
1627 if (disable_bypass)
1628 reg |= sCR0_USFCFG;
1629 else
1630 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001631
1632 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001633 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001634
1635 /* Don't upgrade barriers */
Robin Murphy0caf5f42019-08-15 19:37:23 +01001636 reg &= ~(sCR0_BSU);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001637
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001638 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1639 reg |= sCR0_VMID16EN;
1640
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001641 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1642 reg |= sCR0_EXIDENABLE;
1643
Robin Murphy62b993a2019-08-15 19:37:36 +01001644 if (smmu->impl && smmu->impl->reset)
1645 smmu->impl->reset(smmu);
1646
Will Deacon45ae7cf2013-06-24 18:31:25 +01001647 /* Push the button */
Robin Murphy11febfc2017-03-30 17:56:31 +01001648 arm_smmu_tlb_sync_global(smmu);
Robin Murphy00320ce2019-08-15 19:37:31 +01001649 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001650}
1651
1652static int arm_smmu_id_size_to_bits(int size)
1653{
1654 switch (size) {
1655 case 0:
1656 return 32;
1657 case 1:
1658 return 36;
1659 case 2:
1660 return 40;
1661 case 3:
1662 return 42;
1663 case 4:
1664 return 44;
1665 case 5:
1666 default:
1667 return 48;
1668 }
1669}
1670
1671static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1672{
Robin Murphy490325e2019-08-15 19:37:26 +01001673 unsigned int size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001674 u32 id;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001675 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001676 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001677
1678 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001679 dev_notice(smmu->dev, "SMMUv%d with:\n",
1680 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001681
1682 /* ID0 */
Robin Murphy00320ce2019-08-15 19:37:31 +01001683 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001684
1685 /* Restrict available stages based on module parameter */
1686 if (force_stage == 1)
1687 id &= ~(ID0_S2TS | ID0_NTS);
1688 else if (force_stage == 2)
1689 id &= ~(ID0_S1TS | ID0_NTS);
1690
Will Deacon45ae7cf2013-06-24 18:31:25 +01001691 if (id & ID0_S1TS) {
1692 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1693 dev_notice(smmu->dev, "\tstage 1 translation\n");
1694 }
1695
1696 if (id & ID0_S2TS) {
1697 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1698 dev_notice(smmu->dev, "\tstage 2 translation\n");
1699 }
1700
1701 if (id & ID0_NTS) {
1702 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1703 dev_notice(smmu->dev, "\tnested translation\n");
1704 }
1705
1706 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001707 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001708 dev_err(smmu->dev, "\tno translation support!\n");
1709 return -ENODEV;
1710 }
1711
Robin Murphyb7862e32016-04-13 18:13:03 +01001712 if ((id & ID0_S1TS) &&
1713 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001714 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1715 dev_notice(smmu->dev, "\taddress translation ops\n");
1716 }
1717
Robin Murphybae2c2d2015-07-29 19:46:05 +01001718 /*
1719 * In order for DMA API calls to work properly, we must defer to what
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001720 * the FW says about coherency, regardless of what the hardware claims.
Robin Murphybae2c2d2015-07-29 19:46:05 +01001721 * Fortunately, this also opens up a workaround for systems where the
1722 * ID register value has ended up configured incorrectly.
1723 */
Robin Murphybae2c2d2015-07-29 19:46:05 +01001724 cttw_reg = !!(id & ID0_CTTW);
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001725 if (cttw_fw || cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001726 dev_notice(smmu->dev, "\t%scoherent table walk\n",
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001727 cttw_fw ? "" : "non-");
1728 if (cttw_fw != cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001729 dev_notice(smmu->dev,
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001730 "\t(IDR0.CTTW overridden by FW configuration)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001731
Robin Murphy21174242016-09-12 17:13:48 +01001732 /* Max. number of entries we have for stream matching/indexing */
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001733 if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
1734 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1735 size = 1 << 16;
1736 } else {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001737 size = 1 << FIELD_GET(ID0_NUMSIDB, id);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001738 }
Robin Murphy21174242016-09-12 17:13:48 +01001739 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001740 if (id & ID0_SMS) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001741 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy0caf5f42019-08-15 19:37:23 +01001742 size = FIELD_GET(ID0_NUMSMRG, id);
Robin Murphy21174242016-09-12 17:13:48 +01001743 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001744 dev_err(smmu->dev,
1745 "stream-matching supported, but no SMRs present!\n");
1746 return -ENODEV;
1747 }
1748
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001749 /* Zero-initialised to mark as invalid */
1750 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1751 GFP_KERNEL);
1752 if (!smmu->smrs)
1753 return -ENOMEM;
1754
Will Deacon45ae7cf2013-06-24 18:31:25 +01001755 dev_notice(smmu->dev,
Robin Murphy490325e2019-08-15 19:37:26 +01001756 "\tstream matching with %u register groups", size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001757 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001758 /* s2cr->type == 0 means translation, so initialise explicitly */
1759 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1760 GFP_KERNEL);
1761 if (!smmu->s2crs)
1762 return -ENOMEM;
1763 for (i = 0; i < size; i++)
1764 smmu->s2crs[i] = s2cr_init_val;
1765
Robin Murphy21174242016-09-12 17:13:48 +01001766 smmu->num_mapping_groups = size;
Robin Murphy588888a2016-09-12 17:13:54 +01001767 mutex_init(&smmu->stream_map_mutex);
Will Deacon8e517e72017-07-06 15:55:48 +01001768 spin_lock_init(&smmu->global_sync_lock);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001769
Robin Murphy7602b872016-04-28 17:12:09 +01001770 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1771 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1772 if (!(id & ID0_PTFS_NO_AARCH32S))
1773 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1774 }
1775
Will Deacon45ae7cf2013-06-24 18:31:25 +01001776 /* ID1 */
Robin Murphy00320ce2019-08-15 19:37:31 +01001777 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001778 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001779
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001780 /* Check for size mismatch of SMMU address space from mapped region */
Robin Murphy0caf5f42019-08-15 19:37:23 +01001781 size = 1 << (FIELD_GET(ID1_NUMPAGENDXB, id) + 1);
Robin Murphy490325e2019-08-15 19:37:26 +01001782 if (smmu->numpage != 2 * size << smmu->pgshift)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001783 dev_warn(smmu->dev,
Robin Murphy490325e2019-08-15 19:37:26 +01001784 "SMMU address space size (0x%x) differs from mapped region size (0x%x)!\n",
1785 2 * size << smmu->pgshift, smmu->numpage);
1786 /* Now properly encode NUMPAGE to subsequently derive SMMU_CB_BASE */
1787 smmu->numpage = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001788
Robin Murphy0caf5f42019-08-15 19:37:23 +01001789 smmu->num_s2_context_banks = FIELD_GET(ID1_NUMS2CB, id);
1790 smmu->num_context_banks = FIELD_GET(ID1_NUMCB, id);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001791 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1792 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1793 return -ENODEV;
1794 }
1795 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1796 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphy90df3732017-08-08 14:56:14 +01001797 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
1798 sizeof(*smmu->cbs), GFP_KERNEL);
1799 if (!smmu->cbs)
1800 return -ENOMEM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001801
1802 /* ID2 */
Robin Murphy00320ce2019-08-15 19:37:31 +01001803 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2);
Robin Murphy0caf5f42019-08-15 19:37:23 +01001804 size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_IAS, id));
Will Deacon518f7132014-11-14 17:17:54 +00001805 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001806
Will Deacon518f7132014-11-14 17:17:54 +00001807 /* The output mask is also applied for bypass */
Robin Murphy0caf5f42019-08-15 19:37:23 +01001808 size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_OAS, id));
Will Deacon518f7132014-11-14 17:17:54 +00001809 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001810
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001811 if (id & ID2_VMID16)
1812 smmu->features |= ARM_SMMU_FEAT_VMID16;
1813
Robin Murphyf1d84542015-03-04 16:41:05 +00001814 /*
1815 * What the page table walker can address actually depends on which
1816 * descriptor format is in use, but since a) we don't know that yet,
1817 * and b) it can vary per context bank, this will have to do...
1818 */
1819 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1820 dev_warn(smmu->dev,
1821 "failed to set DMA mask for table walker\n");
1822
Robin Murphyb7862e32016-04-13 18:13:03 +01001823 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001824 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001825 if (smmu->version == ARM_SMMU_V1_64K)
1826 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001827 } else {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001828 size = FIELD_GET(ID2_UBS, id);
Will Deacon518f7132014-11-14 17:17:54 +00001829 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001830 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001831 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001832 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001833 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001834 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001835 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001836 }
1837
Robin Murphy7602b872016-04-28 17:12:09 +01001838 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01001839 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01001840 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01001841 if (smmu->features &
1842 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01001843 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01001844 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01001845 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01001846 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01001847 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01001848
Robin Murphyd5466352016-05-09 17:20:09 +01001849 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1850 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1851 else
1852 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1853 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1854 smmu->pgsize_bitmap);
1855
Will Deacon518f7132014-11-14 17:17:54 +00001856
Will Deacon28d60072014-09-01 16:24:48 +01001857 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1858 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001859 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001860
1861 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1862 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001863 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001864
Robin Murphy3995e182019-08-15 19:37:35 +01001865 if (smmu->impl && smmu->impl->cfg_probe)
1866 return smmu->impl->cfg_probe(smmu);
1867
Will Deacon45ae7cf2013-06-24 18:31:25 +01001868 return 0;
1869}
1870
Robin Murphy67b65a32016-04-13 18:12:57 +01001871struct arm_smmu_match_data {
1872 enum arm_smmu_arch_version version;
1873 enum arm_smmu_implementation model;
1874};
1875
1876#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
Sricharan R96a299d2018-12-04 11:52:09 +05301877static const struct arm_smmu_match_data name = { .version = ver, .model = imp }
Robin Murphy67b65a32016-04-13 18:12:57 +01001878
1879ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1880ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01001881ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001882ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01001883ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Vivek Gautam89cddc52018-12-04 11:52:13 +05301884ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01001885
Joerg Roedel09b52692014-10-02 12:24:45 +02001886static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01001887 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1888 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1889 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01001890 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001891 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01001892 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Vivek Gautam89cddc52018-12-04 11:52:13 +05301893 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01001894 { },
1895};
Robin Murphy09360402014-08-28 17:51:59 +01001896
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001897#ifdef CONFIG_ACPI
1898static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
1899{
1900 int ret = 0;
1901
1902 switch (model) {
1903 case ACPI_IORT_SMMU_V1:
1904 case ACPI_IORT_SMMU_CORELINK_MMU400:
1905 smmu->version = ARM_SMMU_V1;
1906 smmu->model = GENERIC_SMMU;
1907 break;
Robin Murphy84c24372017-06-19 16:41:56 +01001908 case ACPI_IORT_SMMU_CORELINK_MMU401:
1909 smmu->version = ARM_SMMU_V1_64K;
1910 smmu->model = GENERIC_SMMU;
1911 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001912 case ACPI_IORT_SMMU_V2:
1913 smmu->version = ARM_SMMU_V2;
1914 smmu->model = GENERIC_SMMU;
1915 break;
1916 case ACPI_IORT_SMMU_CORELINK_MMU500:
1917 smmu->version = ARM_SMMU_V2;
1918 smmu->model = ARM_MMU500;
1919 break;
Robin Murphy84c24372017-06-19 16:41:56 +01001920 case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
1921 smmu->version = ARM_SMMU_V2;
1922 smmu->model = CAVIUM_SMMUV2;
1923 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001924 default:
1925 ret = -ENODEV;
1926 }
1927
1928 return ret;
1929}
1930
1931static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
1932 struct arm_smmu_device *smmu)
1933{
1934 struct device *dev = smmu->dev;
1935 struct acpi_iort_node *node =
1936 *(struct acpi_iort_node **)dev_get_platdata(dev);
1937 struct acpi_iort_smmu *iort_smmu;
1938 int ret;
1939
1940 /* Retrieve SMMU1/2 specific data */
1941 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
1942
1943 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
1944 if (ret < 0)
1945 return ret;
1946
1947 /* Ignore the configuration access interrupt */
1948 smmu->num_global_irqs = 1;
1949
1950 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
1951 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1952
1953 return 0;
1954}
1955#else
1956static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
1957 struct arm_smmu_device *smmu)
1958{
1959 return -ENODEV;
1960}
1961#endif
1962
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001963static int arm_smmu_device_dt_probe(struct platform_device *pdev,
1964 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001965{
Robin Murphy67b65a32016-04-13 18:12:57 +01001966 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001967 struct device *dev = &pdev->dev;
Robin Murphy021bb842016-09-14 15:26:46 +01001968 bool legacy_binding;
1969
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001970 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1971 &smmu->num_global_irqs)) {
1972 dev_err(dev, "missing #global-interrupts property\n");
1973 return -ENODEV;
1974 }
1975
1976 data = of_device_get_match_data(dev);
1977 smmu->version = data->version;
1978 smmu->model = data->model;
1979
Robin Murphy021bb842016-09-14 15:26:46 +01001980 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
1981 if (legacy_binding && !using_generic_binding) {
1982 if (!using_legacy_binding)
1983 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
1984 using_legacy_binding = true;
1985 } else if (!legacy_binding && !using_legacy_binding) {
1986 using_generic_binding = true;
1987 } else {
1988 dev_err(dev, "not probing due to mismatched DT properties\n");
1989 return -ENODEV;
1990 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001991
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001992 if (of_dma_is_coherent(dev->of_node))
1993 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1994
1995 return 0;
1996}
1997
Robin Murphyf6810c12017-04-10 16:51:05 +05301998static void arm_smmu_bus_init(void)
1999{
2000 /* Oh, for a proper bus abstraction */
2001 if (!iommu_present(&platform_bus_type))
2002 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2003#ifdef CONFIG_ARM_AMBA
2004 if (!iommu_present(&amba_bustype))
2005 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2006#endif
2007#ifdef CONFIG_PCI
2008 if (!iommu_present(&pci_bus_type)) {
2009 pci_request_acs();
2010 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2011 }
2012#endif
Nipun Guptaeab03e22018-09-10 19:19:18 +05302013#ifdef CONFIG_FSL_MC_BUS
2014 if (!iommu_present(&fsl_mc_bus_type))
2015 bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
2016#endif
Robin Murphyf6810c12017-04-10 16:51:05 +05302017}
2018
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002019static int arm_smmu_device_probe(struct platform_device *pdev)
2020{
2021 struct resource *res;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002022 resource_size_t ioaddr;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002023 struct arm_smmu_device *smmu;
2024 struct device *dev = &pdev->dev;
2025 int num_irqs, i, err;
2026
Will Deacon45ae7cf2013-06-24 18:31:25 +01002027 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2028 if (!smmu) {
2029 dev_err(dev, "failed to allocate arm_smmu_device\n");
2030 return -ENOMEM;
2031 }
2032 smmu->dev = dev;
2033
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002034 if (dev->of_node)
2035 err = arm_smmu_device_dt_probe(pdev, smmu);
2036 else
2037 err = arm_smmu_device_acpi_probe(pdev, smmu);
2038
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002039 if (err)
2040 return err;
Robin Murphy09360402014-08-28 17:51:59 +01002041
Robin Murphyfc058d32019-08-15 19:37:33 +01002042 smmu = arm_smmu_impl_init(smmu);
2043 if (IS_ERR(smmu))
2044 return PTR_ERR(smmu);
2045
Will Deacon45ae7cf2013-06-24 18:31:25 +01002046 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002047 ioaddr = res->start;
Julia Lawall8a7f4312013-08-19 12:20:37 +01002048 smmu->base = devm_ioremap_resource(dev, res);
2049 if (IS_ERR(smmu->base))
2050 return PTR_ERR(smmu->base);
Robin Murphy490325e2019-08-15 19:37:26 +01002051 /*
2052 * The resource size should effectively match the value of SMMU_TOP;
2053 * stash that temporarily until we know PAGESIZE to validate it with.
2054 */
2055 smmu->numpage = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002056
Will Deacon45ae7cf2013-06-24 18:31:25 +01002057 num_irqs = 0;
2058 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2059 num_irqs++;
2060 if (num_irqs > smmu->num_global_irqs)
2061 smmu->num_context_irqs++;
2062 }
2063
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002064 if (!smmu->num_context_irqs) {
2065 dev_err(dev, "found %d interrupts but expected at least %d\n",
2066 num_irqs, smmu->num_global_irqs + 1);
2067 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002068 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002069
Kees Cooka86854d2018-06-12 14:07:58 -07002070 smmu->irqs = devm_kcalloc(dev, num_irqs, sizeof(*smmu->irqs),
Will Deacon45ae7cf2013-06-24 18:31:25 +01002071 GFP_KERNEL);
2072 if (!smmu->irqs) {
2073 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2074 return -ENOMEM;
2075 }
2076
2077 for (i = 0; i < num_irqs; ++i) {
2078 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002079
Will Deacon45ae7cf2013-06-24 18:31:25 +01002080 if (irq < 0) {
2081 dev_err(dev, "failed to get irq index %d\n", i);
2082 return -ENODEV;
2083 }
2084 smmu->irqs[i] = irq;
2085 }
2086
Sricharan R96a299d2018-12-04 11:52:09 +05302087 err = devm_clk_bulk_get_all(dev, &smmu->clks);
2088 if (err < 0) {
2089 dev_err(dev, "failed to get clocks %d\n", err);
2090 return err;
2091 }
2092 smmu->num_clks = err;
2093
2094 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
2095 if (err)
2096 return err;
2097
Olav Haugan3c8766d2014-08-22 17:12:32 -07002098 err = arm_smmu_device_cfg_probe(smmu);
2099 if (err)
2100 return err;
2101
Vivek Gautamd1e20222018-07-19 23:23:56 +05302102 if (smmu->version == ARM_SMMU_V2) {
2103 if (smmu->num_context_banks > smmu->num_context_irqs) {
2104 dev_err(dev,
2105 "found only %d context irq(s) but %d required\n",
2106 smmu->num_context_irqs, smmu->num_context_banks);
2107 return -ENODEV;
2108 }
2109
2110 /* Ignore superfluous interrupts */
2111 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002112 }
2113
Will Deacon45ae7cf2013-06-24 18:31:25 +01002114 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08002115 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2116 arm_smmu_global_fault,
2117 IRQF_SHARED,
2118 "arm-smmu global fault",
2119 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002120 if (err) {
2121 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2122 i, smmu->irqs[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01002123 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002124 }
2125 }
2126
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002127 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2128 "smmu.%pa", &ioaddr);
2129 if (err) {
2130 dev_err(dev, "Failed to register iommu in sysfs\n");
2131 return err;
2132 }
2133
2134 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2135 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2136
2137 err = iommu_device_register(&smmu->iommu);
2138 if (err) {
2139 dev_err(dev, "Failed to register iommu\n");
2140 return err;
2141 }
2142
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002143 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01002144 arm_smmu_device_reset(smmu);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03002145 arm_smmu_test_smr_masks(smmu);
Robin Murphy021bb842016-09-14 15:26:46 +01002146
Robin Murphyf6810c12017-04-10 16:51:05 +05302147 /*
Sricharan Rd4a44f02018-12-04 11:52:10 +05302148 * We want to avoid touching dev->power.lock in fastpaths unless
2149 * it's really going to do something useful - pm_runtime_enabled()
2150 * can serve as an ideal proxy for that decision. So, conditionally
2151 * enable pm_runtime.
2152 */
2153 if (dev->pm_domain) {
2154 pm_runtime_set_active(dev);
2155 pm_runtime_enable(dev);
2156 }
2157
2158 /*
Robin Murphyf6810c12017-04-10 16:51:05 +05302159 * For ACPI and generic DT bindings, an SMMU will be probed before
2160 * any device which might need it, so we want the bus ops in place
2161 * ready to handle default domain setup as soon as any SMMU exists.
2162 */
2163 if (!using_legacy_binding)
2164 arm_smmu_bus_init();
2165
Will Deacon45ae7cf2013-06-24 18:31:25 +01002166 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002167}
2168
Robin Murphyf6810c12017-04-10 16:51:05 +05302169/*
2170 * With the legacy DT binding in play, though, we have no guarantees about
2171 * probe order, but then we're also not doing default domains, so we can
2172 * delay setting bus ops until we're sure every possible SMMU is ready,
2173 * and that way ensure that no add_device() calls get missed.
2174 */
2175static int arm_smmu_legacy_bus_init(void)
2176{
2177 if (using_legacy_binding)
2178 arm_smmu_bus_init();
2179 return 0;
2180}
2181device_initcall_sync(arm_smmu_legacy_bus_init);
2182
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002183static void arm_smmu_device_shutdown(struct platform_device *pdev)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002184{
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002185 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002186
2187 if (!smmu)
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002188 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002189
Will Deaconecfadb62013-07-31 19:21:28 +01002190 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002191 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002192
Sricharan Rd4a44f02018-12-04 11:52:10 +05302193 arm_smmu_rpm_get(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002194 /* Turn the thing off */
Robin Murphy00320ce2019-08-15 19:37:31 +01002195 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, sCR0_CLIENTPD);
Sricharan Rd4a44f02018-12-04 11:52:10 +05302196 arm_smmu_rpm_put(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302197
Sricharan Rd4a44f02018-12-04 11:52:10 +05302198 if (pm_runtime_enabled(smmu->dev))
2199 pm_runtime_force_suspend(smmu->dev);
2200 else
2201 clk_bulk_disable(smmu->num_clks, smmu->clks);
2202
2203 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
Nate Watterson7aa86192017-06-29 18:18:15 -04002204}
2205
Sricharan R96a299d2018-12-04 11:52:09 +05302206static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
Robin Murphya2d866f2017-08-08 14:56:15 +01002207{
2208 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
Sricharan R96a299d2018-12-04 11:52:09 +05302209 int ret;
2210
2211 ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
2212 if (ret)
2213 return ret;
Robin Murphya2d866f2017-08-08 14:56:15 +01002214
2215 arm_smmu_device_reset(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302216
Will Deacon45ae7cf2013-06-24 18:31:25 +01002217 return 0;
2218}
2219
Sricharan R96a299d2018-12-04 11:52:09 +05302220static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
Dan Carpenter6614ee72013-08-21 09:34:20 +01002221{
Sricharan R96a299d2018-12-04 11:52:09 +05302222 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2223
2224 clk_bulk_disable(smmu->num_clks, smmu->clks);
2225
2226 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002227}
2228
Robin Murphya2d866f2017-08-08 14:56:15 +01002229static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
2230{
Sricharan R96a299d2018-12-04 11:52:09 +05302231 if (pm_runtime_suspended(dev))
2232 return 0;
Robin Murphya2d866f2017-08-08 14:56:15 +01002233
Sricharan R96a299d2018-12-04 11:52:09 +05302234 return arm_smmu_runtime_resume(dev);
Robin Murphya2d866f2017-08-08 14:56:15 +01002235}
2236
Sricharan R96a299d2018-12-04 11:52:09 +05302237static int __maybe_unused arm_smmu_pm_suspend(struct device *dev)
2238{
2239 if (pm_runtime_suspended(dev))
2240 return 0;
2241
2242 return arm_smmu_runtime_suspend(dev);
2243}
2244
2245static const struct dev_pm_ops arm_smmu_pm_ops = {
2246 SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_pm_suspend, arm_smmu_pm_resume)
2247 SET_RUNTIME_PM_OPS(arm_smmu_runtime_suspend,
2248 arm_smmu_runtime_resume, NULL)
2249};
Robin Murphya2d866f2017-08-08 14:56:15 +01002250
Will Deacon45ae7cf2013-06-24 18:31:25 +01002251static struct platform_driver arm_smmu_driver = {
2252 .driver = {
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002253 .name = "arm-smmu",
2254 .of_match_table = of_match_ptr(arm_smmu_of_match),
2255 .pm = &arm_smmu_pm_ops,
2256 .suppress_bind_attrs = true,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002257 },
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002258 .probe = arm_smmu_device_probe,
Nate Watterson7aa86192017-06-29 18:18:15 -04002259 .shutdown = arm_smmu_device_shutdown,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002260};
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002261builtin_platform_driver(arm_smmu_driver);