blob: d2d357c87b617eaefd466fbc0dc97a49d90dcd44 [file] [log] [blame]
Thomas Gleixner45051532019-05-29 16:57:47 -07001// SPDX-License-Identifier: GPL-2.0-only
Will Deacon45ae7cf2013-06-24 18:31:25 +01002/*
3 * IOMMU API for ARM architected SMMU implementations.
4 *
Will Deacon45ae7cf2013-06-24 18:31:25 +01005 * Copyright (C) 2013 ARM Limited
6 *
7 * Author: Will Deacon <will.deacon@arm.com>
8 *
9 * This driver currently supports:
10 * - SMMUv1 and v2 implementations
11 * - Stream-matching and stream-indexing
12 * - v7/v8 long-descriptor format
13 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010014 * - Context fault reporting
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +030015 * - Extended Stream ID (16 bit)
Will Deacon45ae7cf2013-06-24 18:31:25 +010016 */
17
18#define pr_fmt(fmt) "arm-smmu: " fmt
19
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +000020#include <linux/acpi.h>
21#include <linux/acpi_iort.h>
Robin Murphy0caf5f42019-08-15 19:37:23 +010022#include <linux/bitfield.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010023#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000024#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010025#include <linux/dma-mapping.h>
26#include <linux/err.h>
27#include <linux/interrupt.h>
28#include <linux/io.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000029#include <linux/iopoll.h>
Paul Gortmakeraddb672f2018-12-01 14:19:16 -050030#include <linux/init.h>
31#include <linux/moduleparam.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010033#include <linux/of_address.h>
Robin Murphyd6fc5d92016-09-12 17:13:52 +010034#include <linux/of_device.h>
Robin Murphyadfec2e2016-09-12 17:13:55 +010035#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010036#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010037#include <linux/platform_device.h>
Sricharan R96a299d2018-12-04 11:52:09 +053038#include <linux/pm_runtime.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/slab.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010040
41#include <linux/amba/bus.h>
Nipun Guptaeab03e22018-09-10 19:19:18 +053042#include <linux/fsl/mc.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010043
Robin Murphyc5fc6482019-08-15 19:37:32 +010044#include "arm-smmu.h"
Rob Clark2b037742017-08-09 10:43:03 -040045
Robin Murphy4e4abae2019-06-03 14:15:37 +020046/*
47 * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
48 * global register space are still, in fact, using a hypervisor to mediate it
49 * by trapping and emulating register accesses. Sadly, some deployed versions
50 * of said trapping code have bugs wherein they go horribly wrong for stores
51 * using r31 (i.e. XZR/WZR) as the source register.
52 */
53#define QCOM_DUMMY_VAL -1
54
Rob Clark2b037742017-08-09 10:43:03 -040055#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
56#define TLB_SPIN_COUNT 10
Will Deacon45ae7cf2013-06-24 18:31:25 +010057
Eric Augerf3ebee82017-01-19 20:57:55 +000058#define MSI_IOVA_BASE 0x8000000
59#define MSI_IOVA_LENGTH 0x100000
60
Will Deacon4cf740b2014-07-14 19:47:39 +010061static int force_stage;
Paul Gortmakeraddb672f2018-12-01 14:19:16 -050062/*
63 * not really modular, but the easiest way to keep compat with existing
64 * bootargs behaviour is to continue using module_param() here.
65 */
Robin Murphy25a1c962016-02-10 14:25:33 +000066module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +010067MODULE_PARM_DESC(force_stage,
68 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Douglas Anderson954a03b2019-03-01 11:20:17 -080069static bool disable_bypass =
70 IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT);
Robin Murphy25a1c962016-02-10 14:25:33 +000071module_param(disable_bypass, bool, S_IRUGO);
72MODULE_PARM_DESC(disable_bypass,
73 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +010074
Robin Murphy8e8b2032016-09-12 17:13:50 +010075struct arm_smmu_s2cr {
Robin Murphy588888a2016-09-12 17:13:54 +010076 struct iommu_group *group;
77 int count;
Robin Murphy8e8b2032016-09-12 17:13:50 +010078 enum arm_smmu_s2cr_type type;
79 enum arm_smmu_s2cr_privcfg privcfg;
80 u8 cbndx;
81};
82
83#define s2cr_init_val (struct arm_smmu_s2cr){ \
84 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
85}
86
Will Deacon45ae7cf2013-06-24 18:31:25 +010087struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +010088 u16 mask;
89 u16 id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +010090 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +010091};
92
Robin Murphy90df3732017-08-08 14:56:14 +010093struct arm_smmu_cb {
94 u64 ttbr[2];
95 u32 tcr[2];
96 u32 mair[2];
97 struct arm_smmu_cfg *cfg;
98};
99
Will Deacona9a1b0b2014-05-01 18:05:08 +0100100struct arm_smmu_master_cfg {
Robin Murphyf80cd882016-09-14 15:21:39 +0100101 struct arm_smmu_device *smmu;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100102 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100103};
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100104#define INVALID_SMENDX -1
Robin Murphyadfec2e2016-09-12 17:13:55 +0100105#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
106#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000107#define fwspec_smendx(fw, i) \
108 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphyadfec2e2016-09-12 17:13:55 +0100109#define for_each_cfg_sme(fw, i, idx) \
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000110 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100111
Robin Murphy021bb842016-09-14 15:26:46 +0100112static bool using_legacy_binding, using_generic_binding;
113
Sricharan Rd4a44f02018-12-04 11:52:10 +0530114static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
115{
116 if (pm_runtime_enabled(smmu->dev))
117 return pm_runtime_get_sync(smmu->dev);
118
119 return 0;
120}
121
122static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
123{
124 if (pm_runtime_enabled(smmu->dev))
125 pm_runtime_put(smmu->dev);
126}
127
Joerg Roedel1d672632015-03-26 13:43:10 +0100128static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
129{
130 return container_of(dom, struct arm_smmu_domain, domain);
131}
132
Will Deacon8f68f8e2014-07-15 11:27:08 +0100133static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100134{
135 if (dev_is_pci(dev)) {
136 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700137
Will Deacona9a1b0b2014-05-01 18:05:08 +0100138 while (!pci_is_root_bus(bus))
139 bus = bus->parent;
Robin Murphyf80cd882016-09-14 15:21:39 +0100140 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100141 }
142
Robin Murphyf80cd882016-09-14 15:21:39 +0100143 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100144}
145
Robin Murphyf80cd882016-09-14 15:21:39 +0100146static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100147{
Robin Murphyf80cd882016-09-14 15:21:39 +0100148 *((__be32 *)data) = cpu_to_be32(alias);
149 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100150}
151
Robin Murphyf80cd882016-09-14 15:21:39 +0100152static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100153{
Robin Murphyf80cd882016-09-14 15:21:39 +0100154 struct of_phandle_iterator *it = *(void **)data;
155 struct device_node *np = it->node;
156 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100157
Robin Murphyf80cd882016-09-14 15:21:39 +0100158 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
Uwe Kleine-Königc680e9a2019-08-24 15:28:45 +0200159 "#stream-id-cells", -1)
Robin Murphyf80cd882016-09-14 15:21:39 +0100160 if (it->node == np) {
161 *(void **)data = dev;
162 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700163 }
Robin Murphyf80cd882016-09-14 15:21:39 +0100164 it->node = np;
165 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100166}
167
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100168static struct platform_driver arm_smmu_driver;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100169static struct iommu_ops arm_smmu_ops;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100170
Robin Murphyadfec2e2016-09-12 17:13:55 +0100171static int arm_smmu_register_legacy_master(struct device *dev,
172 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100173{
Robin Murphyadfec2e2016-09-12 17:13:55 +0100174 struct device *smmu_dev;
Robin Murphyf80cd882016-09-14 15:21:39 +0100175 struct device_node *np;
176 struct of_phandle_iterator it;
177 void *data = &it;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100178 u32 *sids;
Robin Murphyf80cd882016-09-14 15:21:39 +0100179 __be32 pci_sid;
180 int err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100181
Robin Murphyf80cd882016-09-14 15:21:39 +0100182 np = dev_get_dev_node(dev);
183 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
184 of_node_put(np);
185 return -ENODEV;
186 }
187
188 it.node = np;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100189 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
190 __find_legacy_master_phandle);
Robin Murphyadfec2e2016-09-12 17:13:55 +0100191 smmu_dev = data;
Robin Murphyf80cd882016-09-14 15:21:39 +0100192 of_node_put(np);
193 if (err == 0)
194 return -ENODEV;
195 if (err < 0)
196 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100197
Robin Murphyf80cd882016-09-14 15:21:39 +0100198 if (dev_is_pci(dev)) {
199 /* "mmu-masters" assumes Stream ID == Requester ID */
200 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
201 &pci_sid);
202 it.cur = &pci_sid;
203 it.cur_count = 1;
204 }
205
Robin Murphyadfec2e2016-09-12 17:13:55 +0100206 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
207 &arm_smmu_ops);
208 if (err)
209 return err;
210
211 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
212 if (!sids)
Robin Murphyf80cd882016-09-14 15:21:39 +0100213 return -ENOMEM;
214
Robin Murphyadfec2e2016-09-12 17:13:55 +0100215 *smmu = dev_get_drvdata(smmu_dev);
216 of_phandle_iterator_args(&it, sids, it.cur_count);
217 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
218 kfree(sids);
219 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100220}
221
222static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
223{
224 int idx;
225
226 do {
227 idx = find_next_zero_bit(map, end, start);
228 if (idx == end)
229 return -ENOSPC;
230 } while (test_and_set_bit(idx, map));
231
232 return idx;
233}
234
235static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
236{
237 clear_bit(idx, map);
238}
239
240/* Wait for any pending TLB invalidations to complete */
Robin Murphy19713fd2019-08-15 19:37:30 +0100241static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
242 int sync, int status)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100243{
Robin Murphy8513c892017-03-30 17:56:32 +0100244 unsigned int spin_cnt, delay;
Robin Murphy19713fd2019-08-15 19:37:30 +0100245 u32 reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100246
Robin Murphyae2b60f2019-09-18 17:17:50 +0100247 if (smmu->impl && unlikely(smmu->impl->tlb_sync))
248 return smmu->impl->tlb_sync(smmu, page, sync, status);
249
Robin Murphy19713fd2019-08-15 19:37:30 +0100250 arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
Robin Murphy8513c892017-03-30 17:56:32 +0100251 for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
252 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100253 reg = arm_smmu_readl(smmu, page, status);
254 if (!(reg & sTLBGSTATUS_GSACTIVE))
Robin Murphy8513c892017-03-30 17:56:32 +0100255 return;
256 cpu_relax();
Will Deacon45ae7cf2013-06-24 18:31:25 +0100257 }
Robin Murphy8513c892017-03-30 17:56:32 +0100258 udelay(delay);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100259 }
Robin Murphy8513c892017-03-30 17:56:32 +0100260 dev_err_ratelimited(smmu->dev,
261 "TLB sync timed out -- SMMU may be deadlocked\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +0100262}
263
Robin Murphy11febfc2017-03-30 17:56:31 +0100264static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
Will Deacon1463fe42013-07-31 19:21:27 +0100265{
Will Deacon8e517e72017-07-06 15:55:48 +0100266 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100267
Will Deacon8e517e72017-07-06 15:55:48 +0100268 spin_lock_irqsave(&smmu->global_sync_lock, flags);
Robin Murphy00320ce2019-08-15 19:37:31 +0100269 __arm_smmu_tlb_sync(smmu, ARM_SMMU_GR0, ARM_SMMU_GR0_sTLBGSYNC,
Robin Murphy19713fd2019-08-15 19:37:30 +0100270 ARM_SMMU_GR0_sTLBGSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100271 spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000272}
273
Robin Murphyae2b60f2019-09-18 17:17:50 +0100274static void arm_smmu_tlb_sync_context(struct arm_smmu_domain *smmu_domain)
Will Deacon1463fe42013-07-31 19:21:27 +0100275{
Robin Murphy11febfc2017-03-30 17:56:31 +0100276 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon8e517e72017-07-06 15:55:48 +0100277 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100278
Will Deacon8e517e72017-07-06 15:55:48 +0100279 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy19713fd2019-08-15 19:37:30 +0100280 __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx),
281 ARM_SMMU_CB_TLBSYNC, ARM_SMMU_CB_TLBSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100282 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000283}
284
Robin Murphy11febfc2017-03-30 17:56:31 +0100285static void arm_smmu_tlb_inv_context_s1(void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000286{
287 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy44f68762018-09-20 17:10:27 +0100288 /*
Robin Murphy19713fd2019-08-15 19:37:30 +0100289 * The TLBI write may be relaxed, so ensure that PTEs cleared by the
290 * current CPU are visible beforehand.
Robin Murphy44f68762018-09-20 17:10:27 +0100291 */
Robin Murphy19713fd2019-08-15 19:37:30 +0100292 wmb();
293 arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
294 ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
Robin Murphyae2b60f2019-09-18 17:17:50 +0100295 arm_smmu_tlb_sync_context(smmu_domain);
Robin Murphy11febfc2017-03-30 17:56:31 +0100296}
297
298static void arm_smmu_tlb_inv_context_s2(void *cookie)
299{
300 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100301 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100302
Robin Murphy00320ce2019-08-15 19:37:31 +0100303 /* See above */
304 wmb();
305 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
Robin Murphy11febfc2017-03-30 17:56:31 +0100306 arm_smmu_tlb_sync_global(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100307}
308
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100309static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
Robin Murphy3370cb62019-09-18 17:17:49 +0100310 size_t granule, void *cookie, int reg)
Will Deacon518f7132014-11-14 17:17:54 +0000311{
312 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100313 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000314 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy3370cb62019-09-18 17:17:49 +0100315 int idx = cfg->cbndx;
Will Deacon518f7132014-11-14 17:17:54 +0000316
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100317 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
Will Deacon7d321bd32018-10-01 12:42:49 +0100318 wmb();
319
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100320 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
321 iova = (iova >> 12) << 12;
322 iova |= cfg->asid;
Robin Murphy75df1382015-12-07 18:18:52 +0000323 do {
Robin Murphy19713fd2019-08-15 19:37:30 +0100324 arm_smmu_cb_write(smmu, idx, reg, iova);
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100325 iova += granule;
326 } while (size -= granule);
327 } else {
328 iova >>= 12;
329 iova |= (u64)cfg->asid << 48;
330 do {
Robin Murphy19713fd2019-08-15 19:37:30 +0100331 arm_smmu_cb_writeq(smmu, idx, reg, iova);
Robin Murphy75df1382015-12-07 18:18:52 +0000332 iova += granule >> 12;
333 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000334 }
335}
336
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100337static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
Robin Murphy3370cb62019-09-18 17:17:49 +0100338 size_t granule, void *cookie, int reg)
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100339{
340 struct arm_smmu_domain *smmu_domain = cookie;
341 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy3370cb62019-09-18 17:17:49 +0100342 int idx = smmu_domain->cfg.cbndx;
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100343
344 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
345 wmb();
346
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100347 iova >>= 12;
348 do {
Robin Murphy61005762019-08-15 19:37:28 +0100349 if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy19713fd2019-08-15 19:37:30 +0100350 arm_smmu_cb_writeq(smmu, idx, reg, iova);
Robin Murphy61005762019-08-15 19:37:28 +0100351 else
Robin Murphy19713fd2019-08-15 19:37:30 +0100352 arm_smmu_cb_write(smmu, idx, reg, iova);
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100353 iova += granule >> 12;
354 } while (size -= granule);
355}
356
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100357static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size,
358 size_t granule, void *cookie)
359{
Robin Murphy3370cb62019-09-18 17:17:49 +0100360 arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
361 ARM_SMMU_CB_S1_TLBIVA);
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100362 arm_smmu_tlb_sync_context(cookie);
363}
364
365static void arm_smmu_tlb_inv_leaf_s1(unsigned long iova, size_t size,
366 size_t granule, void *cookie)
367{
Robin Murphy3370cb62019-09-18 17:17:49 +0100368 arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
369 ARM_SMMU_CB_S1_TLBIVAL);
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100370 arm_smmu_tlb_sync_context(cookie);
371}
372
373static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather,
374 unsigned long iova, size_t granule,
375 void *cookie)
376{
Robin Murphy3370cb62019-09-18 17:17:49 +0100377 arm_smmu_tlb_inv_range_s1(iova, granule, granule, cookie,
378 ARM_SMMU_CB_S1_TLBIVAL);
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100379}
380
381static void arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size,
382 size_t granule, void *cookie)
383{
Robin Murphy3370cb62019-09-18 17:17:49 +0100384 arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie,
385 ARM_SMMU_CB_S2_TLBIIPAS2);
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100386 arm_smmu_tlb_sync_context(cookie);
387}
388
389static void arm_smmu_tlb_inv_leaf_s2(unsigned long iova, size_t size,
390 size_t granule, void *cookie)
391{
Robin Murphy3370cb62019-09-18 17:17:49 +0100392 arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie,
393 ARM_SMMU_CB_S2_TLBIIPAS2L);
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100394 arm_smmu_tlb_sync_context(cookie);
395}
396
397static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather,
398 unsigned long iova, size_t granule,
399 void *cookie)
400{
Robin Murphy3370cb62019-09-18 17:17:49 +0100401 arm_smmu_tlb_inv_range_s2(iova, granule, granule, cookie,
402 ARM_SMMU_CB_S2_TLBIIPAS2L);
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100403}
404
405static void arm_smmu_tlb_inv_any_s2_v1(unsigned long iova, size_t size,
406 size_t granule, void *cookie)
407{
408 arm_smmu_tlb_inv_context_s2(cookie);
409}
Robin Murphy11febfc2017-03-30 17:56:31 +0100410/*
411 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
412 * almost negligible, but the benefit of getting the first one in as far ahead
413 * of the sync as possible is significant, hence we don't just make this a
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100414 * no-op and call arm_smmu_tlb_inv_context_s2() from .iotlb_sync as you might
415 * think.
Robin Murphy11febfc2017-03-30 17:56:31 +0100416 */
Robin Murphy3f3b8d02019-09-18 17:17:48 +0100417static void arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather,
418 unsigned long iova, size_t granule,
419 void *cookie)
Robin Murphy11febfc2017-03-30 17:56:31 +0100420{
421 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy00320ce2019-08-15 19:37:31 +0100422 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy11febfc2017-03-30 17:56:31 +0100423
Robin Murphy00320ce2019-08-15 19:37:31 +0100424 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
Will Deacon7d321bd32018-10-01 12:42:49 +0100425 wmb();
426
Robin Murphy00320ce2019-08-15 19:37:31 +0100427 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
Robin Murphy11febfc2017-03-30 17:56:31 +0100428}
429
Robin Murphy696bcfb2019-09-18 17:17:51 +0100430static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = {
431 .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
432 .tlb_flush_walk = arm_smmu_tlb_inv_walk_s1,
433 .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s1,
434 .tlb_add_page = arm_smmu_tlb_add_page_s1,
Robin Murphy11febfc2017-03-30 17:56:31 +0100435};
436
Robin Murphy696bcfb2019-09-18 17:17:51 +0100437static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
438 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
439 .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2,
440 .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s2,
441 .tlb_add_page = arm_smmu_tlb_add_page_s2,
Robin Murphy11febfc2017-03-30 17:56:31 +0100442};
443
Robin Murphy696bcfb2019-09-18 17:17:51 +0100444static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
445 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
446 .tlb_flush_walk = arm_smmu_tlb_inv_any_s2_v1,
447 .tlb_flush_leaf = arm_smmu_tlb_inv_any_s2_v1,
448 .tlb_add_page = arm_smmu_tlb_add_page_s2_v1,
Will Deacon518f7132014-11-14 17:17:54 +0000449};
450
Will Deacon45ae7cf2013-06-24 18:31:25 +0100451static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
452{
Vivek Gautambc580b52019-04-22 12:40:36 +0530453 u32 fsr, fsynr, cbfrsynra;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100454 unsigned long iova;
455 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100456 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100457 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy19713fd2019-08-15 19:37:30 +0100458 int idx = smmu_domain->cfg.cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100459
Robin Murphy19713fd2019-08-15 19:37:30 +0100460 fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100461 if (!(fsr & FSR_FAULT))
462 return IRQ_NONE;
463
Robin Murphy19713fd2019-08-15 19:37:30 +0100464 fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
465 iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
466 cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100467
Will Deacon3714ce1d2016-08-05 19:49:45 +0100468 dev_err_ratelimited(smmu->dev,
Vivek Gautambc580b52019-04-22 12:40:36 +0530469 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
Robin Murphy19713fd2019-08-15 19:37:30 +0100470 fsr, iova, fsynr, cbfrsynra, idx);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100471
Robin Murphy19713fd2019-08-15 19:37:30 +0100472 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100473 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100474}
475
476static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
477{
478 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
479 struct arm_smmu_device *smmu = dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100480
Robin Murphy00320ce2019-08-15 19:37:31 +0100481 gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
482 gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0);
483 gfsynr1 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR1);
484 gfsynr2 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100485
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000486 if (!gfsr)
487 return IRQ_NONE;
488
Will Deacon45ae7cf2013-06-24 18:31:25 +0100489 dev_err_ratelimited(smmu->dev,
490 "Unexpected global fault, this could be serious\n");
491 dev_err_ratelimited(smmu->dev,
492 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
493 gfsr, gfsynr0, gfsynr1, gfsynr2);
494
Robin Murphy00320ce2019-08-15 19:37:31 +0100495 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr);
Will Deaconadaba322013-07-31 19:21:26 +0100496 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100497}
498
Will Deacon518f7132014-11-14 17:17:54 +0000499static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
500 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100501{
Will Deacon44680ee2014-06-25 11:29:12 +0100502 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy90df3732017-08-08 14:56:14 +0100503 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
504 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
505
506 cb->cfg = cfg;
507
Robin Murphy620565a2019-08-15 19:37:25 +0100508 /* TCR */
Robin Murphy90df3732017-08-08 14:56:14 +0100509 if (stage1) {
510 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
511 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
512 } else {
513 cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
514 cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Robin Murphy620565a2019-08-15 19:37:25 +0100515 cb->tcr[1] |= FIELD_PREP(TCR2_SEP, TCR2_SEP_UPSTREAM);
Robin Murphy90df3732017-08-08 14:56:14 +0100516 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy620565a2019-08-15 19:37:25 +0100517 cb->tcr[1] |= TCR2_AS;
Robin Murphy90df3732017-08-08 14:56:14 +0100518 }
519 } else {
520 cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
521 }
522
523 /* TTBRs */
524 if (stage1) {
525 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
526 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
527 cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
528 } else {
529 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Robin Murphy620565a2019-08-15 19:37:25 +0100530 cb->ttbr[0] |= FIELD_PREP(TTBRn_ASID, cfg->asid);
Robin Murphy90df3732017-08-08 14:56:14 +0100531 cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Robin Murphy620565a2019-08-15 19:37:25 +0100532 cb->ttbr[1] |= FIELD_PREP(TTBRn_ASID, cfg->asid);
Robin Murphy90df3732017-08-08 14:56:14 +0100533 }
534 } else {
535 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
536 }
537
538 /* MAIRs (stage-1 only) */
539 if (stage1) {
540 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
541 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
542 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
543 } else {
544 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
545 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
546 }
547 }
548}
549
550static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
551{
552 u32 reg;
553 bool stage1;
554 struct arm_smmu_cb *cb = &smmu->cbs[idx];
555 struct arm_smmu_cfg *cfg = cb->cfg;
Robin Murphy90df3732017-08-08 14:56:14 +0100556
557 /* Unassigned context banks only need disabling */
558 if (!cfg) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100559 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, 0);
Robin Murphy90df3732017-08-08 14:56:14 +0100560 return;
561 }
562
Will Deacon44680ee2014-06-25 11:29:12 +0100563 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100564
Robin Murphy90df3732017-08-08 14:56:14 +0100565 /* CBA2R */
Will Deacon4a1c93c2015-03-04 12:21:03 +0000566 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100567 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy5114e962019-08-15 19:37:24 +0100568 reg = CBA2R_VA64;
Robin Murphy7602b872016-04-28 17:12:09 +0100569 else
Robin Murphy5114e962019-08-15 19:37:24 +0100570 reg = 0;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800571 /* 16-bit VMIDs live in CBA2R */
572 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Robin Murphy5114e962019-08-15 19:37:24 +0100573 reg |= FIELD_PREP(CBA2R_VMID16, cfg->vmid);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800574
Robin Murphyaadbf212019-08-15 19:37:29 +0100575 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBA2R(idx), reg);
Will Deacon4a1c93c2015-03-04 12:21:03 +0000576 }
577
Will Deacon45ae7cf2013-06-24 18:31:25 +0100578 /* CBAR */
Robin Murphy5114e962019-08-15 19:37:24 +0100579 reg = FIELD_PREP(CBAR_TYPE, cfg->cbar);
Robin Murphyb7862e32016-04-13 18:13:03 +0100580 if (smmu->version < ARM_SMMU_V2)
Robin Murphy5114e962019-08-15 19:37:24 +0100581 reg |= FIELD_PREP(CBAR_IRPTNDX, cfg->irptndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100582
Will Deacon57ca90f2014-02-06 14:59:05 +0000583 /*
584 * Use the weakest shareability/memory types, so they are
585 * overridden by the ttbcr/pte.
586 */
587 if (stage1) {
Robin Murphy5114e962019-08-15 19:37:24 +0100588 reg |= FIELD_PREP(CBAR_S1_BPSHCFG, CBAR_S1_BPSHCFG_NSH) |
589 FIELD_PREP(CBAR_S1_MEMATTR, CBAR_S1_MEMATTR_WB);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800590 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
591 /* 8-bit VMIDs live in CBAR */
Robin Murphy5114e962019-08-15 19:37:24 +0100592 reg |= FIELD_PREP(CBAR_VMID, cfg->vmid);
Will Deacon57ca90f2014-02-06 14:59:05 +0000593 }
Robin Murphyaadbf212019-08-15 19:37:29 +0100594 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100595
Sunil Goutham125458a2017-03-28 16:11:12 +0530596 /*
Robin Murphy620565a2019-08-15 19:37:25 +0100597 * TCR
Sunil Goutham125458a2017-03-28 16:11:12 +0530598 * We must write this before the TTBRs, since it determines the
599 * access behaviour of some fields (in particular, ASID[15:8]).
600 */
Robin Murphy90df3732017-08-08 14:56:14 +0100601 if (stage1 && smmu->version > ARM_SMMU_V1)
Robin Murphy19713fd2019-08-15 19:37:30 +0100602 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]);
603 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100604
Will Deacon45ae7cf2013-06-24 18:31:25 +0100605 /* TTBRs */
Robin Murphy90df3732017-08-08 14:56:14 +0100606 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100607 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid);
608 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
609 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100610 } else {
Robin Murphy19713fd2019-08-15 19:37:30 +0100611 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
Robin Murphy90df3732017-08-08 14:56:14 +0100612 if (stage1)
Robin Murphy19713fd2019-08-15 19:37:30 +0100613 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR1,
614 cb->ttbr[1]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100615 }
616
Will Deacon518f7132014-11-14 17:17:54 +0000617 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100618 if (stage1) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100619 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]);
620 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100621 }
622
Will Deacon45ae7cf2013-06-24 18:31:25 +0100623 /* SCTLR */
Robin Murphy60705292016-08-11 17:44:06 +0100624 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100625 if (stage1)
626 reg |= SCTLR_S1_ASIDPNE;
Robin Murphy90df3732017-08-08 14:56:14 +0100627 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
628 reg |= SCTLR_E;
629
Robin Murphy19713fd2019-08-15 19:37:30 +0100630 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100631}
632
633static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100634 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100635{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100636 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000637 unsigned long ias, oas;
638 struct io_pgtable_ops *pgtbl_ops;
639 struct io_pgtable_cfg pgtbl_cfg;
640 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100641 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100642 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100643
Will Deacon518f7132014-11-14 17:17:54 +0000644 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100645 if (smmu_domain->smmu)
646 goto out_unlock;
647
Will Deacon61bc6712017-01-06 16:56:03 +0000648 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
649 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
650 smmu_domain->smmu = smmu;
651 goto out_unlock;
652 }
653
Will Deaconc752ce42014-06-25 22:46:31 +0100654 /*
655 * Mapping the requested stage onto what we support is surprisingly
656 * complicated, mainly because the spec allows S1+S2 SMMUs without
657 * support for nested translation. That means we end up with the
658 * following table:
659 *
660 * Requested Supported Actual
661 * S1 N S1
662 * S1 S1+S2 S1
663 * S1 S2 S2
664 * S1 S1 S1
665 * N N N
666 * N S1+S2 S2
667 * N S2 S2
668 * N S1 S1
669 *
670 * Note that you can't actually request stage-2 mappings.
671 */
672 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
673 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
674 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
675 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
676
Robin Murphy7602b872016-04-28 17:12:09 +0100677 /*
678 * Choosing a suitable context format is even more fiddly. Until we
679 * grow some way for the caller to express a preference, and/or move
680 * the decision into the io-pgtable code where it arguably belongs,
681 * just aim for the closest thing to the rest of the system, and hope
682 * that the hardware isn't esoteric enough that we can't assume AArch64
683 * support to be a superset of AArch32 support...
684 */
685 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
686 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100687 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
688 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
689 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
690 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
691 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100692 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
693 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
694 ARM_SMMU_FEAT_FMT_AARCH64_16K |
695 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
696 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
697
698 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
699 ret = -EINVAL;
700 goto out_unlock;
701 }
702
Will Deaconc752ce42014-06-25 22:46:31 +0100703 switch (smmu_domain->stage) {
704 case ARM_SMMU_DOMAIN_S1:
705 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
706 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000707 ias = smmu->va_size;
708 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100709 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000710 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +0100711 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +0000712 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100713 ias = min(ias, 32UL);
714 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +0100715 } else {
716 fmt = ARM_V7S;
717 ias = min(ias, 32UL);
718 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +0100719 }
Will Deaconabfd6fe2019-07-02 16:44:41 +0100720 smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops;
Will Deaconc752ce42014-06-25 22:46:31 +0100721 break;
722 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100723 /*
724 * We will likely want to change this if/when KVM gets
725 * involved.
726 */
Will Deaconc752ce42014-06-25 22:46:31 +0100727 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100728 cfg->cbar = CBAR_TYPE_S2_TRANS;
729 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000730 ias = smmu->ipa_size;
731 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100732 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000733 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100734 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000735 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100736 ias = min(ias, 40UL);
737 oas = min(oas, 40UL);
738 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100739 if (smmu->version == ARM_SMMU_V2)
Will Deaconabfd6fe2019-07-02 16:44:41 +0100740 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2;
Robin Murphy11febfc2017-03-30 17:56:31 +0100741 else
Will Deaconabfd6fe2019-07-02 16:44:41 +0100742 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1;
Will Deaconc752ce42014-06-25 22:46:31 +0100743 break;
744 default:
745 ret = -EINVAL;
746 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100747 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100748 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
749 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200750 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100751 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100752
Will Deacon44680ee2014-06-25 11:29:12 +0100753 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100754 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100755 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
756 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100757 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100758 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100759 }
760
Robin Murphy280b6832017-03-30 17:56:29 +0100761 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
Robin Murphyba7e4a02019-08-15 19:37:37 +0100762 cfg->vmid = cfg->cbndx + 1;
Robin Murphy280b6832017-03-30 17:56:29 +0100763 else
Robin Murphyba7e4a02019-08-15 19:37:37 +0100764 cfg->asid = cfg->cbndx;
765
766 smmu_domain->smmu = smmu;
767 if (smmu->impl && smmu->impl->init_context) {
768 ret = smmu->impl->init_context(smmu_domain);
769 if (ret)
770 goto out_unlock;
771 }
Robin Murphy280b6832017-03-30 17:56:29 +0100772
Will Deacon518f7132014-11-14 17:17:54 +0000773 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100774 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000775 .ias = ias,
776 .oas = oas,
Will Deacon4f418452019-06-25 12:51:25 +0100777 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
Robin Murphy696bcfb2019-09-18 17:17:51 +0100778 .tlb = smmu_domain->flush_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100779 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000780 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100781
Robin Murphy44f68762018-09-20 17:10:27 +0100782 if (smmu_domain->non_strict)
783 pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
784
Will Deacon518f7132014-11-14 17:17:54 +0000785 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
786 if (!pgtbl_ops) {
787 ret = -ENOMEM;
788 goto out_clear_smmu;
789 }
790
Robin Murphyd5466352016-05-09 17:20:09 +0100791 /* Update the domain's page sizes to reflect the page table format */
792 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Robin Murphy455eb7d2016-09-12 17:13:58 +0100793 domain->geometry.aperture_end = (1UL << ias) - 1;
794 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +0000795
796 /* Initialise the context bank with our page table cfg */
797 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
Robin Murphy90df3732017-08-08 14:56:14 +0100798 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000799
800 /*
801 * Request context fault interrupt. Do this last to avoid the
802 * handler seeing a half-initialised domain state.
803 */
Will Deacon44680ee2014-06-25 11:29:12 +0100804 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800805 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
806 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200807 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100808 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100809 cfg->irptndx, irq);
810 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100811 }
812
Will Deacon518f7132014-11-14 17:17:54 +0000813 mutex_unlock(&smmu_domain->init_mutex);
814
815 /* Publish page table ops for map/unmap */
816 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100817 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100818
Will Deacon518f7132014-11-14 17:17:54 +0000819out_clear_smmu:
820 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100821out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +0000822 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100823 return ret;
824}
825
826static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
827{
Joerg Roedel1d672632015-03-26 13:43:10 +0100828 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100829 struct arm_smmu_device *smmu = smmu_domain->smmu;
830 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Sricharan Rd4a44f02018-12-04 11:52:10 +0530831 int ret, irq;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100832
Will Deacon61bc6712017-01-06 16:56:03 +0000833 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100834 return;
835
Sricharan Rd4a44f02018-12-04 11:52:10 +0530836 ret = arm_smmu_rpm_get(smmu);
837 if (ret < 0)
838 return;
839
Will Deacon518f7132014-11-14 17:17:54 +0000840 /*
841 * Disable the context bank and free the page tables before freeing
842 * it.
843 */
Robin Murphy90df3732017-08-08 14:56:14 +0100844 smmu->cbs[cfg->cbndx].cfg = NULL;
845 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100846
Will Deacon44680ee2014-06-25 11:29:12 +0100847 if (cfg->irptndx != INVALID_IRPTNDX) {
848 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800849 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100850 }
851
Markus Elfring44830b02015-11-06 18:32:41 +0100852 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +0100853 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Sricharan Rd4a44f02018-12-04 11:52:10 +0530854
855 arm_smmu_rpm_put(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100856}
857
Joerg Roedel1d672632015-03-26 13:43:10 +0100858static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100859{
860 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100861
Will Deacon61bc6712017-01-06 16:56:03 +0000862 if (type != IOMMU_DOMAIN_UNMANAGED &&
863 type != IOMMU_DOMAIN_DMA &&
864 type != IOMMU_DOMAIN_IDENTITY)
Joerg Roedel1d672632015-03-26 13:43:10 +0100865 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100866 /*
867 * Allocate the domain and initialise some of its data structures.
868 * We can't really do anything meaningful until we've added a
869 * master.
870 */
871 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
872 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +0100873 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100874
Robin Murphy021bb842016-09-14 15:26:46 +0100875 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
876 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +0000877 kfree(smmu_domain);
878 return NULL;
879 }
880
Will Deacon518f7132014-11-14 17:17:54 +0000881 mutex_init(&smmu_domain->init_mutex);
Robin Murphy523d7422017-06-22 16:53:56 +0100882 spin_lock_init(&smmu_domain->cb_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +0100883
884 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100885}
886
Joerg Roedel1d672632015-03-26 13:43:10 +0100887static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100888{
Joerg Roedel1d672632015-03-26 13:43:10 +0100889 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +0100890
891 /*
892 * Free the domain resources. We assume that all devices have
893 * already been detached.
894 */
Robin Murphy9adb9592016-01-26 18:06:36 +0000895 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100896 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100897 kfree(smmu_domain);
898}
899
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100900static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
901{
902 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphy0caf5f42019-08-15 19:37:23 +0100903 u32 reg = FIELD_PREP(SMR_ID, smr->id) | FIELD_PREP(SMR_MASK, smr->mask);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100904
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300905 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100906 reg |= SMR_VALID;
Robin Murphy00320ce2019-08-15 19:37:31 +0100907 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100908}
909
Robin Murphy8e8b2032016-09-12 17:13:50 +0100910static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
911{
912 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
Robin Murphy0caf5f42019-08-15 19:37:23 +0100913 u32 reg = FIELD_PREP(S2CR_TYPE, s2cr->type) |
914 FIELD_PREP(S2CR_CBNDX, s2cr->cbndx) |
915 FIELD_PREP(S2CR_PRIVCFG, s2cr->privcfg);
Robin Murphy8e8b2032016-09-12 17:13:50 +0100916
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300917 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
918 smmu->smrs[idx].valid)
919 reg |= S2CR_EXIDVALID;
Robin Murphy00320ce2019-08-15 19:37:31 +0100920 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
Robin Murphy8e8b2032016-09-12 17:13:50 +0100921}
922
923static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
924{
925 arm_smmu_write_s2cr(smmu, idx);
926 if (smmu->smrs)
927 arm_smmu_write_smr(smmu, idx);
928}
929
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300930/*
931 * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
932 * should be called after sCR0 is written.
933 */
934static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
935{
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300936 u32 smr;
937
938 if (!smmu->smrs)
939 return;
940
941 /*
942 * SMR.ID bits may not be preserved if the corresponding MASK
943 * bits are set, so check each one separately. We can reject
944 * masters later if they try to claim IDs outside these masks.
945 */
Robin Murphy0caf5f42019-08-15 19:37:23 +0100946 smr = FIELD_PREP(SMR_ID, smmu->streamid_mask);
Robin Murphy00320ce2019-08-15 19:37:31 +0100947 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(0), smr);
948 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(0));
Robin Murphy0caf5f42019-08-15 19:37:23 +0100949 smmu->streamid_mask = FIELD_GET(SMR_ID, smr);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300950
Robin Murphy0caf5f42019-08-15 19:37:23 +0100951 smr = FIELD_PREP(SMR_MASK, smmu->streamid_mask);
Robin Murphy00320ce2019-08-15 19:37:31 +0100952 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(0), smr);
953 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(0));
Robin Murphy0caf5f42019-08-15 19:37:23 +0100954 smmu->smr_mask_mask = FIELD_GET(SMR_MASK, smr);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300955}
956
Robin Murphy588888a2016-09-12 17:13:54 +0100957static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100958{
959 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy588888a2016-09-12 17:13:54 +0100960 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100961
Robin Murphy588888a2016-09-12 17:13:54 +0100962 /* Stream indexing is blissfully easy */
963 if (!smrs)
964 return id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100965
Robin Murphy588888a2016-09-12 17:13:54 +0100966 /* Validating SMRs is... less so */
967 for (i = 0; i < smmu->num_mapping_groups; ++i) {
968 if (!smrs[i].valid) {
969 /*
970 * Note the first free entry we come across, which
971 * we'll claim in the end if nothing else matches.
972 */
973 if (free_idx < 0)
974 free_idx = i;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100975 continue;
976 }
Robin Murphy588888a2016-09-12 17:13:54 +0100977 /*
978 * If the new entry is _entirely_ matched by an existing entry,
979 * then reuse that, with the guarantee that there also cannot
980 * be any subsequent conflicting entries. In normal use we'd
981 * expect simply identical entries for this case, but there's
982 * no harm in accommodating the generalisation.
983 */
984 if ((mask & smrs[i].mask) == mask &&
985 !((id ^ smrs[i].id) & ~smrs[i].mask))
986 return i;
987 /*
988 * If the new entry has any other overlap with an existing one,
989 * though, then there always exists at least one stream ID
990 * which would cause a conflict, and we can't allow that risk.
991 */
992 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
993 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100994 }
995
Robin Murphy588888a2016-09-12 17:13:54 +0100996 return free_idx;
997}
998
999static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1000{
1001 if (--smmu->s2crs[idx].count)
1002 return false;
1003
1004 smmu->s2crs[idx] = s2cr_init_val;
1005 if (smmu->smrs)
1006 smmu->smrs[idx].valid = false;
1007
1008 return true;
1009}
1010
1011static int arm_smmu_master_alloc_smes(struct device *dev)
1012{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001013 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001014 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy588888a2016-09-12 17:13:54 +01001015 struct arm_smmu_device *smmu = cfg->smmu;
1016 struct arm_smmu_smr *smrs = smmu->smrs;
1017 struct iommu_group *group;
1018 int i, idx, ret;
1019
1020 mutex_lock(&smmu->stream_map_mutex);
1021 /* Figure out a viable stream map entry allocation */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001022 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001023 u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]);
1024 u16 mask = FIELD_GET(SMR_MASK, fwspec->ids[i]);
Robin Murphy021bb842016-09-14 15:26:46 +01001025
Robin Murphy588888a2016-09-12 17:13:54 +01001026 if (idx != INVALID_SMENDX) {
1027 ret = -EEXIST;
1028 goto out_err;
1029 }
1030
Robin Murphy021bb842016-09-14 15:26:46 +01001031 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy588888a2016-09-12 17:13:54 +01001032 if (ret < 0)
1033 goto out_err;
1034
1035 idx = ret;
1036 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy021bb842016-09-14 15:26:46 +01001037 smrs[idx].id = sid;
1038 smrs[idx].mask = mask;
Robin Murphy588888a2016-09-12 17:13:54 +01001039 smrs[idx].valid = true;
1040 }
1041 smmu->s2crs[idx].count++;
1042 cfg->smendx[i] = (s16)idx;
1043 }
1044
1045 group = iommu_group_get_for_dev(dev);
1046 if (!group)
1047 group = ERR_PTR(-ENOMEM);
1048 if (IS_ERR(group)) {
1049 ret = PTR_ERR(group);
1050 goto out_err;
1051 }
1052 iommu_group_put(group);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001053
Will Deacon45ae7cf2013-06-24 18:31:25 +01001054 /* It worked! Now, poke the actual hardware */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001055 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001056 arm_smmu_write_sme(smmu, idx);
1057 smmu->s2crs[idx].group = group;
1058 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001059
Robin Murphy588888a2016-09-12 17:13:54 +01001060 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001061 return 0;
1062
Robin Murphy588888a2016-09-12 17:13:54 +01001063out_err:
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001064 while (i--) {
Robin Murphy588888a2016-09-12 17:13:54 +01001065 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001066 cfg->smendx[i] = INVALID_SMENDX;
1067 }
Robin Murphy588888a2016-09-12 17:13:54 +01001068 mutex_unlock(&smmu->stream_map_mutex);
1069 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001070}
1071
Robin Murphyadfec2e2016-09-12 17:13:55 +01001072static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001073{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001074 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1075 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphyd3097e32016-09-12 17:13:53 +01001076 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001077
Robin Murphy588888a2016-09-12 17:13:54 +01001078 mutex_lock(&smmu->stream_map_mutex);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001079 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001080 if (arm_smmu_free_sme(smmu, idx))
1081 arm_smmu_write_sme(smmu, idx);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001082 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001083 }
Robin Murphy588888a2016-09-12 17:13:54 +01001084 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001085}
1086
Will Deacon45ae7cf2013-06-24 18:31:25 +01001087static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphyadfec2e2016-09-12 17:13:55 +01001088 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001089{
Will Deacon44680ee2014-06-25 11:29:12 +01001090 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001091 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001092 u8 cbndx = smmu_domain->cfg.cbndx;
Will Deacon61bc6712017-01-06 16:56:03 +00001093 enum arm_smmu_s2cr_type type;
Robin Murphy588888a2016-09-12 17:13:54 +01001094 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001095
Will Deacon61bc6712017-01-06 16:56:03 +00001096 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
1097 type = S2CR_TYPE_BYPASS;
1098 else
1099 type = S2CR_TYPE_TRANS;
1100
Robin Murphyadfec2e2016-09-12 17:13:55 +01001101 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy8e8b2032016-09-12 17:13:50 +01001102 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy588888a2016-09-12 17:13:54 +01001103 continue;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001104
Robin Murphy8e8b2032016-09-12 17:13:50 +01001105 s2cr[idx].type = type;
Sricharan Re1989802017-01-06 18:58:15 +05301106 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001107 s2cr[idx].cbndx = cbndx;
1108 arm_smmu_write_s2cr(smmu, idx);
Will Deacon43b412b2014-07-15 11:22:24 +01001109 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001110 return 0;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001111}
1112
Will Deacon45ae7cf2013-06-24 18:31:25 +01001113static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1114{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001115 int ret;
Joerg Roedel9b468f72018-11-29 14:01:00 +01001116 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001117 struct arm_smmu_device *smmu;
Joerg Roedel1d672632015-03-26 13:43:10 +01001118 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001119
Robin Murphyadfec2e2016-09-12 17:13:55 +01001120 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001121 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1122 return -ENXIO;
1123 }
1124
Robin Murphyfba4f8e2016-10-17 12:06:21 +01001125 /*
1126 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1127 * domains between of_xlate() and add_device() - we have no way to cope
1128 * with that, so until ARM gets converted to rely on groups and default
1129 * domains, just say no (but more politely than by dereferencing NULL).
1130 * This should be at least a WARN_ON once that's sorted.
1131 */
1132 if (!fwspec->iommu_priv)
1133 return -ENODEV;
1134
Robin Murphyadfec2e2016-09-12 17:13:55 +01001135 smmu = fwspec_smmu(fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301136
1137 ret = arm_smmu_rpm_get(smmu);
1138 if (ret < 0)
1139 return ret;
1140
Will Deacon518f7132014-11-14 17:17:54 +00001141 /* Ensure that the domain is finalised */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001142 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001143 if (ret < 0)
Sricharan Rd4a44f02018-12-04 11:52:10 +05301144 goto rpm_put;
Will Deacon518f7132014-11-14 17:17:54 +00001145
Will Deacon45ae7cf2013-06-24 18:31:25 +01001146 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001147 * Sanity check the domain. We don't support domains across
1148 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001149 */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001150 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001151 dev_err(dev,
1152 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphyadfec2e2016-09-12 17:13:55 +01001153 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Sricharan Rd4a44f02018-12-04 11:52:10 +05301154 ret = -EINVAL;
1155 goto rpm_put;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001156 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001157
1158 /* Looks ok, so add the device to the domain */
Sricharan Rd4a44f02018-12-04 11:52:10 +05301159 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
1160
1161rpm_put:
1162 arm_smmu_rpm_put(smmu);
1163 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001164}
1165
Will Deacon45ae7cf2013-06-24 18:31:25 +01001166static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001167 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001168{
Robin Murphy523d7422017-06-22 16:53:56 +01001169 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301170 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1171 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001172
Will Deacon518f7132014-11-14 17:17:54 +00001173 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001174 return -ENODEV;
1175
Sricharan Rd4a44f02018-12-04 11:52:10 +05301176 arm_smmu_rpm_get(smmu);
1177 ret = ops->map(ops, iova, paddr, size, prot);
1178 arm_smmu_rpm_put(smmu);
1179
1180 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001181}
1182
1183static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
Will Deacon56f8af52019-07-02 16:44:06 +01001184 size_t size, struct iommu_iotlb_gather *gather)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001185{
Robin Murphy523d7422017-06-22 16:53:56 +01001186 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301187 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1188 size_t ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001189
Will Deacon518f7132014-11-14 17:17:54 +00001190 if (!ops)
1191 return 0;
1192
Sricharan Rd4a44f02018-12-04 11:52:10 +05301193 arm_smmu_rpm_get(smmu);
Will Deacona2d3a382019-07-02 16:44:58 +01001194 ret = ops->unmap(ops, iova, size, gather);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301195 arm_smmu_rpm_put(smmu);
1196
1197 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001198}
1199
Robin Murphy44f68762018-09-20 17:10:27 +01001200static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
1201{
1202 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301203 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy44f68762018-09-20 17:10:27 +01001204
Will Deaconabfd6fe2019-07-02 16:44:41 +01001205 if (smmu_domain->flush_ops) {
Sricharan Rd4a44f02018-12-04 11:52:10 +05301206 arm_smmu_rpm_get(smmu);
Robin Murphy696bcfb2019-09-18 17:17:51 +01001207 smmu_domain->flush_ops->tlb_flush_all(smmu_domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301208 arm_smmu_rpm_put(smmu);
1209 }
Robin Murphy44f68762018-09-20 17:10:27 +01001210}
1211
Will Deacon56f8af52019-07-02 16:44:06 +01001212static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
1213 struct iommu_iotlb_gather *gather)
Robin Murphy32b12442017-09-28 15:55:01 +01001214{
1215 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301216 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy32b12442017-09-28 15:55:01 +01001217
Robin Murphyae2b60f2019-09-18 17:17:50 +01001218 if (!smmu)
1219 return;
1220
1221 arm_smmu_rpm_get(smmu);
1222 if (smmu->version == ARM_SMMU_V2 ||
1223 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1224 arm_smmu_tlb_sync_context(smmu_domain);
1225 else
1226 arm_smmu_tlb_sync_global(smmu);
1227 arm_smmu_rpm_put(smmu);
Robin Murphy32b12442017-09-28 15:55:01 +01001228}
1229
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001230static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1231 dma_addr_t iova)
1232{
Joerg Roedel1d672632015-03-26 13:43:10 +01001233 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001234 struct arm_smmu_device *smmu = smmu_domain->smmu;
1235 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1236 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1237 struct device *dev = smmu->dev;
Robin Murphy19713fd2019-08-15 19:37:30 +01001238 void __iomem *reg;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001239 u32 tmp;
1240 u64 phys;
Robin Murphy523d7422017-06-22 16:53:56 +01001241 unsigned long va, flags;
Robin Murphy19713fd2019-08-15 19:37:30 +01001242 int ret, idx = cfg->cbndx;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301243
1244 ret = arm_smmu_rpm_get(smmu);
1245 if (ret < 0)
1246 return 0;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001247
Robin Murphy523d7422017-06-22 16:53:56 +01001248 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy661d9622015-05-27 17:09:34 +01001249 va = iova & ~0xfffUL;
Robin Murphy61005762019-08-15 19:37:28 +01001250 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy19713fd2019-08-15 19:37:30 +01001251 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
Robin Murphy61005762019-08-15 19:37:28 +01001252 else
Robin Murphy19713fd2019-08-15 19:37:30 +01001253 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001254
Robin Murphy19713fd2019-08-15 19:37:30 +01001255 reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR;
1256 if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ATSR_ACTIVE), 5, 50)) {
Robin Murphy523d7422017-06-22 16:53:56 +01001257 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001258 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001259 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001260 &iova);
1261 return ops->iova_to_phys(ops, iova);
1262 }
1263
Robin Murphy19713fd2019-08-15 19:37:30 +01001264 phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR);
Robin Murphy523d7422017-06-22 16:53:56 +01001265 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001266 if (phys & CB_PAR_F) {
1267 dev_err(dev, "translation fault!\n");
1268 dev_err(dev, "PAR = 0x%llx\n", phys);
1269 return 0;
1270 }
1271
Sricharan Rd4a44f02018-12-04 11:52:10 +05301272 arm_smmu_rpm_put(smmu);
1273
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001274 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1275}
1276
Will Deacon45ae7cf2013-06-24 18:31:25 +01001277static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001278 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001279{
Joerg Roedel1d672632015-03-26 13:43:10 +01001280 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Robin Murphy523d7422017-06-22 16:53:56 +01001281 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001282
Sunil Gouthambdf95922017-04-25 15:27:52 +05301283 if (domain->type == IOMMU_DOMAIN_IDENTITY)
1284 return iova;
1285
Will Deacon518f7132014-11-14 17:17:54 +00001286 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001287 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001288
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001289 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
Robin Murphy523d7422017-06-22 16:53:56 +01001290 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1291 return arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001292
Robin Murphy523d7422017-06-22 16:53:56 +01001293 return ops->iova_to_phys(ops, iova);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001294}
1295
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001296static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001297{
Will Deacond0948942014-06-24 17:30:10 +01001298 switch (cap) {
1299 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001300 /*
1301 * Return true here as the SMMU can always send out coherent
1302 * requests.
1303 */
1304 return true;
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001305 case IOMMU_CAP_NOEXEC:
1306 return true;
Will Deacond0948942014-06-24 17:30:10 +01001307 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001308 return false;
Will Deacond0948942014-06-24 17:30:10 +01001309 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001310}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001311
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001312static
1313struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
Robin Murphy021bb842016-09-14 15:26:46 +01001314{
Suzuki K Poulose67843bb2019-07-23 23:18:34 +01001315 struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver,
1316 fwnode);
Robin Murphy021bb842016-09-14 15:26:46 +01001317 put_device(dev);
1318 return dev ? dev_get_drvdata(dev) : NULL;
1319}
1320
Will Deacon03edb222015-01-19 14:27:33 +00001321static int arm_smmu_add_device(struct device *dev)
1322{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001323 struct arm_smmu_device *smmu;
Robin Murphyf80cd882016-09-14 15:21:39 +01001324 struct arm_smmu_master_cfg *cfg;
Joerg Roedel9b468f72018-11-29 14:01:00 +01001325 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001326 int i, ret;
1327
Robin Murphy021bb842016-09-14 15:26:46 +01001328 if (using_legacy_binding) {
1329 ret = arm_smmu_register_legacy_master(dev, &smmu);
Artem Savkova7990c62017-08-08 12:26:02 +02001330
1331 /*
1332 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
1333 * will allocate/initialise a new one. Thus we need to update fwspec for
1334 * later use.
1335 */
Joerg Roedel9b468f72018-11-29 14:01:00 +01001336 fwspec = dev_iommu_fwspec_get(dev);
Robin Murphy021bb842016-09-14 15:26:46 +01001337 if (ret)
1338 goto out_free;
Robin Murphy3c117b52016-11-02 17:31:32 +00001339 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001340 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
Robin Murphy021bb842016-09-14 15:26:46 +01001341 } else {
1342 return -ENODEV;
1343 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001344
1345 ret = -EINVAL;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001346 for (i = 0; i < fwspec->num_ids; i++) {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001347 u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]);
1348 u16 mask = FIELD_GET(SMR_MASK, fwspec->ids[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01001349
Robin Murphyadfec2e2016-09-12 17:13:55 +01001350 if (sid & ~smmu->streamid_mask) {
Robin Murphyf80cd882016-09-14 15:21:39 +01001351 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy021bb842016-09-14 15:26:46 +01001352 sid, smmu->streamid_mask);
1353 goto out_free;
1354 }
1355 if (mask & ~smmu->smr_mask_mask) {
1356 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
Peng Fan6323f472017-04-21 17:03:36 +08001357 mask, smmu->smr_mask_mask);
Robin Murphyf80cd882016-09-14 15:21:39 +01001358 goto out_free;
1359 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001360 }
Will Deacon03edb222015-01-19 14:27:33 +00001361
Robin Murphyadfec2e2016-09-12 17:13:55 +01001362 ret = -ENOMEM;
1363 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1364 GFP_KERNEL);
1365 if (!cfg)
1366 goto out_free;
1367
1368 cfg->smmu = smmu;
1369 fwspec->iommu_priv = cfg;
1370 while (i--)
1371 cfg->smendx[i] = INVALID_SMENDX;
1372
Sricharan Rd4a44f02018-12-04 11:52:10 +05301373 ret = arm_smmu_rpm_get(smmu);
1374 if (ret < 0)
1375 goto out_cfg_free;
1376
Robin Murphy588888a2016-09-12 17:13:54 +01001377 ret = arm_smmu_master_alloc_smes(dev);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301378 arm_smmu_rpm_put(smmu);
1379
Robin Murphyadfec2e2016-09-12 17:13:55 +01001380 if (ret)
Vivek Gautamc54451a2017-07-06 15:07:00 +05301381 goto out_cfg_free;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001382
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001383 iommu_device_link(&smmu->iommu, dev);
1384
Sricharan R655e3642018-12-04 11:52:11 +05301385 device_link_add(dev, smmu->dev,
1386 DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
1387
Robin Murphyadfec2e2016-09-12 17:13:55 +01001388 return 0;
Robin Murphyf80cd882016-09-14 15:21:39 +01001389
Vivek Gautamc54451a2017-07-06 15:07:00 +05301390out_cfg_free:
1391 kfree(cfg);
Robin Murphyf80cd882016-09-14 15:21:39 +01001392out_free:
Robin Murphyadfec2e2016-09-12 17:13:55 +01001393 iommu_fwspec_free(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001394 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00001395}
1396
Will Deacon45ae7cf2013-06-24 18:31:25 +01001397static void arm_smmu_remove_device(struct device *dev)
1398{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001399 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001400 struct arm_smmu_master_cfg *cfg;
1401 struct arm_smmu_device *smmu;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301402 int ret;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001403
Robin Murphyadfec2e2016-09-12 17:13:55 +01001404 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyf80cd882016-09-14 15:21:39 +01001405 return;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001406
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001407 cfg = fwspec->iommu_priv;
1408 smmu = cfg->smmu;
1409
Sricharan Rd4a44f02018-12-04 11:52:10 +05301410 ret = arm_smmu_rpm_get(smmu);
1411 if (ret < 0)
1412 return;
1413
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001414 iommu_device_unlink(&smmu->iommu, dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001415 arm_smmu_master_free_smes(fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301416
1417 arm_smmu_rpm_put(smmu);
1418
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001419 iommu_group_remove_device(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001420 kfree(fwspec->iommu_priv);
1421 iommu_fwspec_free(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001422}
1423
Joerg Roedelaf659932015-10-21 23:51:41 +02001424static struct iommu_group *arm_smmu_device_group(struct device *dev)
1425{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001426 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001427 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy588888a2016-09-12 17:13:54 +01001428 struct iommu_group *group = NULL;
1429 int i, idx;
1430
Robin Murphyadfec2e2016-09-12 17:13:55 +01001431 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001432 if (group && smmu->s2crs[idx].group &&
1433 group != smmu->s2crs[idx].group)
1434 return ERR_PTR(-EINVAL);
1435
1436 group = smmu->s2crs[idx].group;
1437 }
1438
1439 if (group)
Robin Murphye1b44cb2016-11-11 17:59:22 +00001440 return iommu_group_ref_get(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001441
1442 if (dev_is_pci(dev))
1443 group = pci_device_group(dev);
Nipun Guptaeab03e22018-09-10 19:19:18 +05301444 else if (dev_is_fsl_mc(dev))
1445 group = fsl_mc_device_group(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001446 else
1447 group = generic_device_group(dev);
1448
Joerg Roedelaf659932015-10-21 23:51:41 +02001449 return group;
1450}
1451
Will Deaconc752ce42014-06-25 22:46:31 +01001452static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1453 enum iommu_attr attr, void *data)
1454{
Joerg Roedel1d672632015-03-26 13:43:10 +01001455 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001456
Robin Murphy44f68762018-09-20 17:10:27 +01001457 switch(domain->type) {
1458 case IOMMU_DOMAIN_UNMANAGED:
1459 switch (attr) {
1460 case DOMAIN_ATTR_NESTING:
1461 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1462 return 0;
1463 default:
1464 return -ENODEV;
1465 }
1466 break;
1467 case IOMMU_DOMAIN_DMA:
1468 switch (attr) {
1469 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1470 *(int *)data = smmu_domain->non_strict;
1471 return 0;
1472 default:
1473 return -ENODEV;
1474 }
1475 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001476 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001477 return -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001478 }
1479}
1480
1481static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1482 enum iommu_attr attr, void *data)
1483{
Will Deacon518f7132014-11-14 17:17:54 +00001484 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001485 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001486
Will Deacon518f7132014-11-14 17:17:54 +00001487 mutex_lock(&smmu_domain->init_mutex);
1488
Robin Murphy44f68762018-09-20 17:10:27 +01001489 switch(domain->type) {
1490 case IOMMU_DOMAIN_UNMANAGED:
1491 switch (attr) {
1492 case DOMAIN_ATTR_NESTING:
1493 if (smmu_domain->smmu) {
1494 ret = -EPERM;
1495 goto out_unlock;
1496 }
1497
1498 if (*(int *)data)
1499 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1500 else
1501 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1502 break;
1503 default:
1504 ret = -ENODEV;
Will Deacon518f7132014-11-14 17:17:54 +00001505 }
Robin Murphy44f68762018-09-20 17:10:27 +01001506 break;
1507 case IOMMU_DOMAIN_DMA:
1508 switch (attr) {
1509 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1510 smmu_domain->non_strict = *(int *)data;
1511 break;
1512 default:
1513 ret = -ENODEV;
1514 }
Will Deacon518f7132014-11-14 17:17:54 +00001515 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001516 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001517 ret = -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001518 }
Will Deacon518f7132014-11-14 17:17:54 +00001519out_unlock:
1520 mutex_unlock(&smmu_domain->init_mutex);
1521 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001522}
1523
Robin Murphy021bb842016-09-14 15:26:46 +01001524static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1525{
Robin Murphy56fbf602017-03-31 12:03:33 +01001526 u32 mask, fwid = 0;
Robin Murphy021bb842016-09-14 15:26:46 +01001527
1528 if (args->args_count > 0)
Robin Murphy0caf5f42019-08-15 19:37:23 +01001529 fwid |= FIELD_PREP(SMR_ID, args->args[0]);
Robin Murphy021bb842016-09-14 15:26:46 +01001530
1531 if (args->args_count > 1)
Robin Murphy0caf5f42019-08-15 19:37:23 +01001532 fwid |= FIELD_PREP(SMR_MASK, args->args[1]);
Robin Murphy56fbf602017-03-31 12:03:33 +01001533 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
Robin Murphy0caf5f42019-08-15 19:37:23 +01001534 fwid |= FIELD_PREP(SMR_MASK, mask);
Robin Murphy021bb842016-09-14 15:26:46 +01001535
1536 return iommu_fwspec_add_ids(dev, &fwid, 1);
1537}
1538
Eric Augerf3ebee82017-01-19 20:57:55 +00001539static void arm_smmu_get_resv_regions(struct device *dev,
1540 struct list_head *head)
1541{
1542 struct iommu_resv_region *region;
1543 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1544
1545 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00001546 prot, IOMMU_RESV_SW_MSI);
Eric Augerf3ebee82017-01-19 20:57:55 +00001547 if (!region)
1548 return;
1549
1550 list_add_tail(&region->list, head);
Robin Murphy273df962017-03-16 17:00:19 +00001551
1552 iommu_dma_get_resv_regions(dev, head);
Eric Augerf3ebee82017-01-19 20:57:55 +00001553}
1554
1555static void arm_smmu_put_resv_regions(struct device *dev,
1556 struct list_head *head)
1557{
1558 struct iommu_resv_region *entry, *next;
1559
1560 list_for_each_entry_safe(entry, next, head, list)
1561 kfree(entry);
1562}
1563
Will Deacon518f7132014-11-14 17:17:54 +00001564static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001565 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001566 .domain_alloc = arm_smmu_domain_alloc,
1567 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001568 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001569 .map = arm_smmu_map,
1570 .unmap = arm_smmu_unmap,
Robin Murphy44f68762018-09-20 17:10:27 +01001571 .flush_iotlb_all = arm_smmu_flush_iotlb_all,
Robin Murphy32b12442017-09-28 15:55:01 +01001572 .iotlb_sync = arm_smmu_iotlb_sync,
Will Deaconc752ce42014-06-25 22:46:31 +01001573 .iova_to_phys = arm_smmu_iova_to_phys,
1574 .add_device = arm_smmu_add_device,
1575 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001576 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001577 .domain_get_attr = arm_smmu_domain_get_attr,
1578 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy021bb842016-09-14 15:26:46 +01001579 .of_xlate = arm_smmu_of_xlate,
Eric Augerf3ebee82017-01-19 20:57:55 +00001580 .get_resv_regions = arm_smmu_get_resv_regions,
1581 .put_resv_regions = arm_smmu_put_resv_regions,
Will Deacon518f7132014-11-14 17:17:54 +00001582 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001583};
1584
1585static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1586{
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001587 int i;
Robin Murphy62b993a2019-08-15 19:37:36 +01001588 u32 reg;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001589
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001590 /* clear global FSR */
Robin Murphy00320ce2019-08-15 19:37:31 +01001591 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
1592 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001593
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001594 /*
1595 * Reset stream mapping groups: Initial values mark all SMRn as
1596 * invalid and all S2CRn as bypass unless overridden.
1597 */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001598 for (i = 0; i < smmu->num_mapping_groups; ++i)
1599 arm_smmu_write_sme(smmu, i);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001600
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001601 /* Make sure all context banks are disabled and clear CB_FSR */
1602 for (i = 0; i < smmu->num_context_banks; ++i) {
Robin Murphy90df3732017-08-08 14:56:14 +01001603 arm_smmu_write_context_bank(smmu, i);
Robin Murphy19713fd2019-08-15 19:37:30 +01001604 arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, FSR_FAULT);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001605 }
Will Deacon1463fe42013-07-31 19:21:27 +01001606
Will Deacon45ae7cf2013-06-24 18:31:25 +01001607 /* Invalidate the TLB, just in case */
Robin Murphy00320ce2019-08-15 19:37:31 +01001608 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLH, QCOM_DUMMY_VAL);
1609 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLNSNH, QCOM_DUMMY_VAL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001610
Robin Murphy00320ce2019-08-15 19:37:31 +01001611 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001612
Will Deacon45ae7cf2013-06-24 18:31:25 +01001613 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001614 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001615
1616 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001617 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001618
Robin Murphy25a1c962016-02-10 14:25:33 +00001619 /* Enable client access, handling unmatched streams as appropriate */
1620 reg &= ~sCR0_CLIENTPD;
1621 if (disable_bypass)
1622 reg |= sCR0_USFCFG;
1623 else
1624 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001625
1626 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001627 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001628
1629 /* Don't upgrade barriers */
Robin Murphy0caf5f42019-08-15 19:37:23 +01001630 reg &= ~(sCR0_BSU);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001631
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001632 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1633 reg |= sCR0_VMID16EN;
1634
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001635 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1636 reg |= sCR0_EXIDENABLE;
1637
Robin Murphy62b993a2019-08-15 19:37:36 +01001638 if (smmu->impl && smmu->impl->reset)
1639 smmu->impl->reset(smmu);
1640
Will Deacon45ae7cf2013-06-24 18:31:25 +01001641 /* Push the button */
Robin Murphy11febfc2017-03-30 17:56:31 +01001642 arm_smmu_tlb_sync_global(smmu);
Robin Murphy00320ce2019-08-15 19:37:31 +01001643 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001644}
1645
1646static int arm_smmu_id_size_to_bits(int size)
1647{
1648 switch (size) {
1649 case 0:
1650 return 32;
1651 case 1:
1652 return 36;
1653 case 2:
1654 return 40;
1655 case 3:
1656 return 42;
1657 case 4:
1658 return 44;
1659 case 5:
1660 default:
1661 return 48;
1662 }
1663}
1664
1665static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1666{
Robin Murphy490325e2019-08-15 19:37:26 +01001667 unsigned int size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001668 u32 id;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001669 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001670 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001671
1672 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001673 dev_notice(smmu->dev, "SMMUv%d with:\n",
1674 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001675
1676 /* ID0 */
Robin Murphy00320ce2019-08-15 19:37:31 +01001677 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001678
1679 /* Restrict available stages based on module parameter */
1680 if (force_stage == 1)
1681 id &= ~(ID0_S2TS | ID0_NTS);
1682 else if (force_stage == 2)
1683 id &= ~(ID0_S1TS | ID0_NTS);
1684
Will Deacon45ae7cf2013-06-24 18:31:25 +01001685 if (id & ID0_S1TS) {
1686 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1687 dev_notice(smmu->dev, "\tstage 1 translation\n");
1688 }
1689
1690 if (id & ID0_S2TS) {
1691 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1692 dev_notice(smmu->dev, "\tstage 2 translation\n");
1693 }
1694
1695 if (id & ID0_NTS) {
1696 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1697 dev_notice(smmu->dev, "\tnested translation\n");
1698 }
1699
1700 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001701 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001702 dev_err(smmu->dev, "\tno translation support!\n");
1703 return -ENODEV;
1704 }
1705
Robin Murphyb7862e32016-04-13 18:13:03 +01001706 if ((id & ID0_S1TS) &&
1707 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001708 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1709 dev_notice(smmu->dev, "\taddress translation ops\n");
1710 }
1711
Robin Murphybae2c2d2015-07-29 19:46:05 +01001712 /*
1713 * In order for DMA API calls to work properly, we must defer to what
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001714 * the FW says about coherency, regardless of what the hardware claims.
Robin Murphybae2c2d2015-07-29 19:46:05 +01001715 * Fortunately, this also opens up a workaround for systems where the
1716 * ID register value has ended up configured incorrectly.
1717 */
Robin Murphybae2c2d2015-07-29 19:46:05 +01001718 cttw_reg = !!(id & ID0_CTTW);
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001719 if (cttw_fw || cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001720 dev_notice(smmu->dev, "\t%scoherent table walk\n",
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001721 cttw_fw ? "" : "non-");
1722 if (cttw_fw != cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001723 dev_notice(smmu->dev,
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001724 "\t(IDR0.CTTW overridden by FW configuration)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001725
Robin Murphy21174242016-09-12 17:13:48 +01001726 /* Max. number of entries we have for stream matching/indexing */
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001727 if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
1728 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1729 size = 1 << 16;
1730 } else {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001731 size = 1 << FIELD_GET(ID0_NUMSIDB, id);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001732 }
Robin Murphy21174242016-09-12 17:13:48 +01001733 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001734 if (id & ID0_SMS) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001735 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy0caf5f42019-08-15 19:37:23 +01001736 size = FIELD_GET(ID0_NUMSMRG, id);
Robin Murphy21174242016-09-12 17:13:48 +01001737 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001738 dev_err(smmu->dev,
1739 "stream-matching supported, but no SMRs present!\n");
1740 return -ENODEV;
1741 }
1742
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001743 /* Zero-initialised to mark as invalid */
1744 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1745 GFP_KERNEL);
1746 if (!smmu->smrs)
1747 return -ENOMEM;
1748
Will Deacon45ae7cf2013-06-24 18:31:25 +01001749 dev_notice(smmu->dev,
Robin Murphy490325e2019-08-15 19:37:26 +01001750 "\tstream matching with %u register groups", size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001751 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001752 /* s2cr->type == 0 means translation, so initialise explicitly */
1753 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1754 GFP_KERNEL);
1755 if (!smmu->s2crs)
1756 return -ENOMEM;
1757 for (i = 0; i < size; i++)
1758 smmu->s2crs[i] = s2cr_init_val;
1759
Robin Murphy21174242016-09-12 17:13:48 +01001760 smmu->num_mapping_groups = size;
Robin Murphy588888a2016-09-12 17:13:54 +01001761 mutex_init(&smmu->stream_map_mutex);
Will Deacon8e517e72017-07-06 15:55:48 +01001762 spin_lock_init(&smmu->global_sync_lock);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001763
Robin Murphy7602b872016-04-28 17:12:09 +01001764 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1765 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1766 if (!(id & ID0_PTFS_NO_AARCH32S))
1767 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1768 }
1769
Will Deacon45ae7cf2013-06-24 18:31:25 +01001770 /* ID1 */
Robin Murphy00320ce2019-08-15 19:37:31 +01001771 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001772 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001773
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001774 /* Check for size mismatch of SMMU address space from mapped region */
Robin Murphy0caf5f42019-08-15 19:37:23 +01001775 size = 1 << (FIELD_GET(ID1_NUMPAGENDXB, id) + 1);
Robin Murphy490325e2019-08-15 19:37:26 +01001776 if (smmu->numpage != 2 * size << smmu->pgshift)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001777 dev_warn(smmu->dev,
Robin Murphy490325e2019-08-15 19:37:26 +01001778 "SMMU address space size (0x%x) differs from mapped region size (0x%x)!\n",
1779 2 * size << smmu->pgshift, smmu->numpage);
1780 /* Now properly encode NUMPAGE to subsequently derive SMMU_CB_BASE */
1781 smmu->numpage = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001782
Robin Murphy0caf5f42019-08-15 19:37:23 +01001783 smmu->num_s2_context_banks = FIELD_GET(ID1_NUMS2CB, id);
1784 smmu->num_context_banks = FIELD_GET(ID1_NUMCB, id);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001785 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1786 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1787 return -ENODEV;
1788 }
1789 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1790 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphy90df3732017-08-08 14:56:14 +01001791 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
1792 sizeof(*smmu->cbs), GFP_KERNEL);
1793 if (!smmu->cbs)
1794 return -ENOMEM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001795
1796 /* ID2 */
Robin Murphy00320ce2019-08-15 19:37:31 +01001797 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2);
Robin Murphy0caf5f42019-08-15 19:37:23 +01001798 size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_IAS, id));
Will Deacon518f7132014-11-14 17:17:54 +00001799 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001800
Will Deacon518f7132014-11-14 17:17:54 +00001801 /* The output mask is also applied for bypass */
Robin Murphy0caf5f42019-08-15 19:37:23 +01001802 size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_OAS, id));
Will Deacon518f7132014-11-14 17:17:54 +00001803 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001804
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001805 if (id & ID2_VMID16)
1806 smmu->features |= ARM_SMMU_FEAT_VMID16;
1807
Robin Murphyf1d84542015-03-04 16:41:05 +00001808 /*
1809 * What the page table walker can address actually depends on which
1810 * descriptor format is in use, but since a) we don't know that yet,
1811 * and b) it can vary per context bank, this will have to do...
1812 */
1813 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1814 dev_warn(smmu->dev,
1815 "failed to set DMA mask for table walker\n");
1816
Robin Murphyb7862e32016-04-13 18:13:03 +01001817 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001818 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001819 if (smmu->version == ARM_SMMU_V1_64K)
1820 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001821 } else {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001822 size = FIELD_GET(ID2_UBS, id);
Will Deacon518f7132014-11-14 17:17:54 +00001823 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001824 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001825 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001826 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001827 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001828 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001829 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001830 }
1831
Robin Murphy7602b872016-04-28 17:12:09 +01001832 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01001833 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01001834 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01001835 if (smmu->features &
1836 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01001837 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01001838 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01001839 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01001840 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01001841 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01001842
Robin Murphyd5466352016-05-09 17:20:09 +01001843 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1844 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1845 else
1846 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1847 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1848 smmu->pgsize_bitmap);
1849
Will Deacon518f7132014-11-14 17:17:54 +00001850
Will Deacon28d60072014-09-01 16:24:48 +01001851 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1852 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001853 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001854
1855 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1856 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001857 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001858
Robin Murphy3995e182019-08-15 19:37:35 +01001859 if (smmu->impl && smmu->impl->cfg_probe)
1860 return smmu->impl->cfg_probe(smmu);
1861
Will Deacon45ae7cf2013-06-24 18:31:25 +01001862 return 0;
1863}
1864
Robin Murphy67b65a32016-04-13 18:12:57 +01001865struct arm_smmu_match_data {
1866 enum arm_smmu_arch_version version;
1867 enum arm_smmu_implementation model;
1868};
1869
1870#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
Sricharan R96a299d2018-12-04 11:52:09 +05301871static const struct arm_smmu_match_data name = { .version = ver, .model = imp }
Robin Murphy67b65a32016-04-13 18:12:57 +01001872
1873ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1874ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01001875ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001876ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01001877ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Vivek Gautam89cddc52018-12-04 11:52:13 +05301878ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01001879
Joerg Roedel09b52692014-10-02 12:24:45 +02001880static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01001881 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1882 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1883 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01001884 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001885 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01001886 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Vivek Gautam89cddc52018-12-04 11:52:13 +05301887 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01001888 { },
1889};
Robin Murphy09360402014-08-28 17:51:59 +01001890
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001891#ifdef CONFIG_ACPI
1892static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
1893{
1894 int ret = 0;
1895
1896 switch (model) {
1897 case ACPI_IORT_SMMU_V1:
1898 case ACPI_IORT_SMMU_CORELINK_MMU400:
1899 smmu->version = ARM_SMMU_V1;
1900 smmu->model = GENERIC_SMMU;
1901 break;
Robin Murphy84c24372017-06-19 16:41:56 +01001902 case ACPI_IORT_SMMU_CORELINK_MMU401:
1903 smmu->version = ARM_SMMU_V1_64K;
1904 smmu->model = GENERIC_SMMU;
1905 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001906 case ACPI_IORT_SMMU_V2:
1907 smmu->version = ARM_SMMU_V2;
1908 smmu->model = GENERIC_SMMU;
1909 break;
1910 case ACPI_IORT_SMMU_CORELINK_MMU500:
1911 smmu->version = ARM_SMMU_V2;
1912 smmu->model = ARM_MMU500;
1913 break;
Robin Murphy84c24372017-06-19 16:41:56 +01001914 case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
1915 smmu->version = ARM_SMMU_V2;
1916 smmu->model = CAVIUM_SMMUV2;
1917 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001918 default:
1919 ret = -ENODEV;
1920 }
1921
1922 return ret;
1923}
1924
1925static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
1926 struct arm_smmu_device *smmu)
1927{
1928 struct device *dev = smmu->dev;
1929 struct acpi_iort_node *node =
1930 *(struct acpi_iort_node **)dev_get_platdata(dev);
1931 struct acpi_iort_smmu *iort_smmu;
1932 int ret;
1933
1934 /* Retrieve SMMU1/2 specific data */
1935 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
1936
1937 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
1938 if (ret < 0)
1939 return ret;
1940
1941 /* Ignore the configuration access interrupt */
1942 smmu->num_global_irqs = 1;
1943
1944 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
1945 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1946
1947 return 0;
1948}
1949#else
1950static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
1951 struct arm_smmu_device *smmu)
1952{
1953 return -ENODEV;
1954}
1955#endif
1956
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001957static int arm_smmu_device_dt_probe(struct platform_device *pdev,
1958 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001959{
Robin Murphy67b65a32016-04-13 18:12:57 +01001960 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001961 struct device *dev = &pdev->dev;
Robin Murphy021bb842016-09-14 15:26:46 +01001962 bool legacy_binding;
1963
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001964 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1965 &smmu->num_global_irqs)) {
1966 dev_err(dev, "missing #global-interrupts property\n");
1967 return -ENODEV;
1968 }
1969
1970 data = of_device_get_match_data(dev);
1971 smmu->version = data->version;
1972 smmu->model = data->model;
1973
Robin Murphy021bb842016-09-14 15:26:46 +01001974 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
1975 if (legacy_binding && !using_generic_binding) {
1976 if (!using_legacy_binding)
1977 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
1978 using_legacy_binding = true;
1979 } else if (!legacy_binding && !using_legacy_binding) {
1980 using_generic_binding = true;
1981 } else {
1982 dev_err(dev, "not probing due to mismatched DT properties\n");
1983 return -ENODEV;
1984 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001985
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001986 if (of_dma_is_coherent(dev->of_node))
1987 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1988
1989 return 0;
1990}
1991
Robin Murphyf6810c12017-04-10 16:51:05 +05301992static void arm_smmu_bus_init(void)
1993{
1994 /* Oh, for a proper bus abstraction */
1995 if (!iommu_present(&platform_bus_type))
1996 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
1997#ifdef CONFIG_ARM_AMBA
1998 if (!iommu_present(&amba_bustype))
1999 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2000#endif
2001#ifdef CONFIG_PCI
2002 if (!iommu_present(&pci_bus_type)) {
2003 pci_request_acs();
2004 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2005 }
2006#endif
Nipun Guptaeab03e22018-09-10 19:19:18 +05302007#ifdef CONFIG_FSL_MC_BUS
2008 if (!iommu_present(&fsl_mc_bus_type))
2009 bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
2010#endif
Robin Murphyf6810c12017-04-10 16:51:05 +05302011}
2012
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002013static int arm_smmu_device_probe(struct platform_device *pdev)
2014{
2015 struct resource *res;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002016 resource_size_t ioaddr;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002017 struct arm_smmu_device *smmu;
2018 struct device *dev = &pdev->dev;
2019 int num_irqs, i, err;
2020
Will Deacon45ae7cf2013-06-24 18:31:25 +01002021 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2022 if (!smmu) {
2023 dev_err(dev, "failed to allocate arm_smmu_device\n");
2024 return -ENOMEM;
2025 }
2026 smmu->dev = dev;
2027
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002028 if (dev->of_node)
2029 err = arm_smmu_device_dt_probe(pdev, smmu);
2030 else
2031 err = arm_smmu_device_acpi_probe(pdev, smmu);
2032
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002033 if (err)
2034 return err;
Robin Murphy09360402014-08-28 17:51:59 +01002035
Robin Murphyfc058d32019-08-15 19:37:33 +01002036 smmu = arm_smmu_impl_init(smmu);
2037 if (IS_ERR(smmu))
2038 return PTR_ERR(smmu);
2039
Will Deacon45ae7cf2013-06-24 18:31:25 +01002040 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002041 ioaddr = res->start;
Julia Lawall8a7f4312013-08-19 12:20:37 +01002042 smmu->base = devm_ioremap_resource(dev, res);
2043 if (IS_ERR(smmu->base))
2044 return PTR_ERR(smmu->base);
Robin Murphy490325e2019-08-15 19:37:26 +01002045 /*
2046 * The resource size should effectively match the value of SMMU_TOP;
2047 * stash that temporarily until we know PAGESIZE to validate it with.
2048 */
2049 smmu->numpage = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002050
Will Deacon45ae7cf2013-06-24 18:31:25 +01002051 num_irqs = 0;
2052 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2053 num_irqs++;
2054 if (num_irqs > smmu->num_global_irqs)
2055 smmu->num_context_irqs++;
2056 }
2057
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002058 if (!smmu->num_context_irqs) {
2059 dev_err(dev, "found %d interrupts but expected at least %d\n",
2060 num_irqs, smmu->num_global_irqs + 1);
2061 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002062 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002063
Kees Cooka86854d2018-06-12 14:07:58 -07002064 smmu->irqs = devm_kcalloc(dev, num_irqs, sizeof(*smmu->irqs),
Will Deacon45ae7cf2013-06-24 18:31:25 +01002065 GFP_KERNEL);
2066 if (!smmu->irqs) {
2067 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2068 return -ENOMEM;
2069 }
2070
2071 for (i = 0; i < num_irqs; ++i) {
2072 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002073
Will Deacon45ae7cf2013-06-24 18:31:25 +01002074 if (irq < 0) {
2075 dev_err(dev, "failed to get irq index %d\n", i);
2076 return -ENODEV;
2077 }
2078 smmu->irqs[i] = irq;
2079 }
2080
Sricharan R96a299d2018-12-04 11:52:09 +05302081 err = devm_clk_bulk_get_all(dev, &smmu->clks);
2082 if (err < 0) {
2083 dev_err(dev, "failed to get clocks %d\n", err);
2084 return err;
2085 }
2086 smmu->num_clks = err;
2087
2088 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
2089 if (err)
2090 return err;
2091
Olav Haugan3c8766d2014-08-22 17:12:32 -07002092 err = arm_smmu_device_cfg_probe(smmu);
2093 if (err)
2094 return err;
2095
Vivek Gautamd1e20222018-07-19 23:23:56 +05302096 if (smmu->version == ARM_SMMU_V2) {
2097 if (smmu->num_context_banks > smmu->num_context_irqs) {
2098 dev_err(dev,
2099 "found only %d context irq(s) but %d required\n",
2100 smmu->num_context_irqs, smmu->num_context_banks);
2101 return -ENODEV;
2102 }
2103
2104 /* Ignore superfluous interrupts */
2105 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002106 }
2107
Will Deacon45ae7cf2013-06-24 18:31:25 +01002108 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08002109 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2110 arm_smmu_global_fault,
2111 IRQF_SHARED,
2112 "arm-smmu global fault",
2113 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002114 if (err) {
2115 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2116 i, smmu->irqs[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01002117 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002118 }
2119 }
2120
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002121 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2122 "smmu.%pa", &ioaddr);
2123 if (err) {
2124 dev_err(dev, "Failed to register iommu in sysfs\n");
2125 return err;
2126 }
2127
2128 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2129 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2130
2131 err = iommu_device_register(&smmu->iommu);
2132 if (err) {
2133 dev_err(dev, "Failed to register iommu\n");
2134 return err;
2135 }
2136
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002137 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01002138 arm_smmu_device_reset(smmu);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03002139 arm_smmu_test_smr_masks(smmu);
Robin Murphy021bb842016-09-14 15:26:46 +01002140
Robin Murphyf6810c12017-04-10 16:51:05 +05302141 /*
Sricharan Rd4a44f02018-12-04 11:52:10 +05302142 * We want to avoid touching dev->power.lock in fastpaths unless
2143 * it's really going to do something useful - pm_runtime_enabled()
2144 * can serve as an ideal proxy for that decision. So, conditionally
2145 * enable pm_runtime.
2146 */
2147 if (dev->pm_domain) {
2148 pm_runtime_set_active(dev);
2149 pm_runtime_enable(dev);
2150 }
2151
2152 /*
Robin Murphyf6810c12017-04-10 16:51:05 +05302153 * For ACPI and generic DT bindings, an SMMU will be probed before
2154 * any device which might need it, so we want the bus ops in place
2155 * ready to handle default domain setup as soon as any SMMU exists.
2156 */
2157 if (!using_legacy_binding)
2158 arm_smmu_bus_init();
2159
Will Deacon45ae7cf2013-06-24 18:31:25 +01002160 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002161}
2162
Robin Murphyf6810c12017-04-10 16:51:05 +05302163/*
2164 * With the legacy DT binding in play, though, we have no guarantees about
2165 * probe order, but then we're also not doing default domains, so we can
2166 * delay setting bus ops until we're sure every possible SMMU is ready,
2167 * and that way ensure that no add_device() calls get missed.
2168 */
2169static int arm_smmu_legacy_bus_init(void)
2170{
2171 if (using_legacy_binding)
2172 arm_smmu_bus_init();
2173 return 0;
2174}
2175device_initcall_sync(arm_smmu_legacy_bus_init);
2176
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002177static void arm_smmu_device_shutdown(struct platform_device *pdev)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002178{
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002179 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002180
2181 if (!smmu)
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002182 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002183
Will Deaconecfadb62013-07-31 19:21:28 +01002184 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002185 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002186
Sricharan Rd4a44f02018-12-04 11:52:10 +05302187 arm_smmu_rpm_get(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002188 /* Turn the thing off */
Robin Murphy00320ce2019-08-15 19:37:31 +01002189 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, sCR0_CLIENTPD);
Sricharan Rd4a44f02018-12-04 11:52:10 +05302190 arm_smmu_rpm_put(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302191
Sricharan Rd4a44f02018-12-04 11:52:10 +05302192 if (pm_runtime_enabled(smmu->dev))
2193 pm_runtime_force_suspend(smmu->dev);
2194 else
2195 clk_bulk_disable(smmu->num_clks, smmu->clks);
2196
2197 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
Nate Watterson7aa86192017-06-29 18:18:15 -04002198}
2199
Sricharan R96a299d2018-12-04 11:52:09 +05302200static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
Robin Murphya2d866f2017-08-08 14:56:15 +01002201{
2202 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
Sricharan R96a299d2018-12-04 11:52:09 +05302203 int ret;
2204
2205 ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
2206 if (ret)
2207 return ret;
Robin Murphya2d866f2017-08-08 14:56:15 +01002208
2209 arm_smmu_device_reset(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302210
Will Deacon45ae7cf2013-06-24 18:31:25 +01002211 return 0;
2212}
2213
Sricharan R96a299d2018-12-04 11:52:09 +05302214static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
Dan Carpenter6614ee72013-08-21 09:34:20 +01002215{
Sricharan R96a299d2018-12-04 11:52:09 +05302216 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2217
2218 clk_bulk_disable(smmu->num_clks, smmu->clks);
2219
2220 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002221}
2222
Robin Murphya2d866f2017-08-08 14:56:15 +01002223static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
2224{
Sricharan R96a299d2018-12-04 11:52:09 +05302225 if (pm_runtime_suspended(dev))
2226 return 0;
Robin Murphya2d866f2017-08-08 14:56:15 +01002227
Sricharan R96a299d2018-12-04 11:52:09 +05302228 return arm_smmu_runtime_resume(dev);
Robin Murphya2d866f2017-08-08 14:56:15 +01002229}
2230
Sricharan R96a299d2018-12-04 11:52:09 +05302231static int __maybe_unused arm_smmu_pm_suspend(struct device *dev)
2232{
2233 if (pm_runtime_suspended(dev))
2234 return 0;
2235
2236 return arm_smmu_runtime_suspend(dev);
2237}
2238
2239static const struct dev_pm_ops arm_smmu_pm_ops = {
2240 SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_pm_suspend, arm_smmu_pm_resume)
2241 SET_RUNTIME_PM_OPS(arm_smmu_runtime_suspend,
2242 arm_smmu_runtime_resume, NULL)
2243};
Robin Murphya2d866f2017-08-08 14:56:15 +01002244
Will Deacon45ae7cf2013-06-24 18:31:25 +01002245static struct platform_driver arm_smmu_driver = {
2246 .driver = {
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002247 .name = "arm-smmu",
2248 .of_match_table = of_match_ptr(arm_smmu_of_match),
2249 .pm = &arm_smmu_pm_ops,
2250 .suppress_bind_attrs = true,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002251 },
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002252 .probe = arm_smmu_device_probe,
Nate Watterson7aa86192017-06-29 18:18:15 -04002253 .shutdown = arm_smmu_device_shutdown,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002254};
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002255builtin_platform_driver(arm_smmu_driver);