blob: 362b6b5a28ee518ea6b3fff0cf1a6734b2abc04f [file] [log] [blame]
Thomas Gleixner45051532019-05-29 16:57:47 -07001// SPDX-License-Identifier: GPL-2.0-only
Will Deacon45ae7cf2013-06-24 18:31:25 +01002/*
3 * IOMMU API for ARM architected SMMU implementations.
4 *
Will Deacon45ae7cf2013-06-24 18:31:25 +01005 * Copyright (C) 2013 ARM Limited
6 *
7 * Author: Will Deacon <will.deacon@arm.com>
8 *
9 * This driver currently supports:
10 * - SMMUv1 and v2 implementations
11 * - Stream-matching and stream-indexing
12 * - v7/v8 long-descriptor format
13 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010014 * - Context fault reporting
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +030015 * - Extended Stream ID (16 bit)
Will Deacon45ae7cf2013-06-24 18:31:25 +010016 */
17
18#define pr_fmt(fmt) "arm-smmu: " fmt
19
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +000020#include <linux/acpi.h>
21#include <linux/acpi_iort.h>
Robin Murphy0caf5f42019-08-15 19:37:23 +010022#include <linux/bitfield.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010023#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000024#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010025#include <linux/dma-mapping.h>
26#include <linux/err.h>
27#include <linux/interrupt.h>
28#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010029#include <linux/io-64-nonatomic-hi-lo.h>
Rob Herringb77cf112019-02-05 10:37:31 -060030#include <linux/io-pgtable.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000031#include <linux/iopoll.h>
Paul Gortmakeraddb672f2018-12-01 14:19:16 -050032#include <linux/init.h>
33#include <linux/moduleparam.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010034#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010035#include <linux/of_address.h>
Robin Murphyd6fc5d92016-09-12 17:13:52 +010036#include <linux/of_device.h>
Robin Murphyadfec2e2016-09-12 17:13:55 +010037#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010038#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/platform_device.h>
Sricharan R96a299d2018-12-04 11:52:09 +053040#include <linux/pm_runtime.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041#include <linux/slab.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010042
43#include <linux/amba/bus.h>
Nipun Guptaeab03e22018-09-10 19:19:18 +053044#include <linux/fsl/mc.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010045
Robin Murphyc5fc6482019-08-15 19:37:32 +010046#include "arm-smmu.h"
Rob Clark2b037742017-08-09 10:43:03 -040047
Robin Murphy4e4abae2019-06-03 14:15:37 +020048/*
49 * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
50 * global register space are still, in fact, using a hypervisor to mediate it
51 * by trapping and emulating register accesses. Sadly, some deployed versions
52 * of said trapping code have bugs wherein they go horribly wrong for stores
53 * using r31 (i.e. XZR/WZR) as the source register.
54 */
55#define QCOM_DUMMY_VAL -1
56
Rob Clark2b037742017-08-09 10:43:03 -040057#define ARM_MMU500_ACTLR_CPRE (1 << 1)
58
59#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
Feng Kan74f55d32017-10-11 15:08:39 -070060#define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10)
Rob Clark2b037742017-08-09 10:43:03 -040061#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
62
63#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
64#define TLB_SPIN_COUNT 10
Will Deacon45ae7cf2013-06-24 18:31:25 +010065
Eric Augerf3ebee82017-01-19 20:57:55 +000066#define MSI_IOVA_BASE 0x8000000
67#define MSI_IOVA_LENGTH 0x100000
68
Will Deacon4cf740b2014-07-14 19:47:39 +010069static int force_stage;
Paul Gortmakeraddb672f2018-12-01 14:19:16 -050070/*
71 * not really modular, but the easiest way to keep compat with existing
72 * bootargs behaviour is to continue using module_param() here.
73 */
Robin Murphy25a1c962016-02-10 14:25:33 +000074module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +010075MODULE_PARM_DESC(force_stage,
76 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Douglas Anderson954a03b2019-03-01 11:20:17 -080077static bool disable_bypass =
78 IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT);
Robin Murphy25a1c962016-02-10 14:25:33 +000079module_param(disable_bypass, bool, S_IRUGO);
80MODULE_PARM_DESC(disable_bypass,
81 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +010082
Robin Murphy8e8b2032016-09-12 17:13:50 +010083struct arm_smmu_s2cr {
Robin Murphy588888a2016-09-12 17:13:54 +010084 struct iommu_group *group;
85 int count;
Robin Murphy8e8b2032016-09-12 17:13:50 +010086 enum arm_smmu_s2cr_type type;
87 enum arm_smmu_s2cr_privcfg privcfg;
88 u8 cbndx;
89};
90
91#define s2cr_init_val (struct arm_smmu_s2cr){ \
92 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
93}
94
Will Deacon45ae7cf2013-06-24 18:31:25 +010095struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +010096 u16 mask;
97 u16 id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +010098 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +010099};
100
Robin Murphy90df3732017-08-08 14:56:14 +0100101struct arm_smmu_cb {
102 u64 ttbr[2];
103 u32 tcr[2];
104 u32 mair[2];
105 struct arm_smmu_cfg *cfg;
106};
107
Will Deacona9a1b0b2014-05-01 18:05:08 +0100108struct arm_smmu_master_cfg {
Robin Murphyf80cd882016-09-14 15:21:39 +0100109 struct arm_smmu_device *smmu;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100110 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100111};
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100112#define INVALID_SMENDX -1
Robin Murphyadfec2e2016-09-12 17:13:55 +0100113#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
114#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000115#define fwspec_smendx(fw, i) \
116 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphyadfec2e2016-09-12 17:13:55 +0100117#define for_each_cfg_sme(fw, i, idx) \
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000118 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100119
Robin Murphy7602b872016-04-28 17:12:09 +0100120enum arm_smmu_context_fmt {
121 ARM_SMMU_CTX_FMT_NONE,
122 ARM_SMMU_CTX_FMT_AARCH64,
123 ARM_SMMU_CTX_FMT_AARCH32_L,
124 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100125};
126
127struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100128 u8 cbndx;
129 u8 irptndx;
Robin Murphy280b6832017-03-30 17:56:29 +0100130 union {
131 u16 asid;
132 u16 vmid;
133 };
Robin Murphy5114e962019-08-15 19:37:24 +0100134 enum arm_smmu_cbar_type cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100135 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100136};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100137#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100138
Will Deaconc752ce42014-06-25 22:46:31 +0100139enum arm_smmu_domain_stage {
140 ARM_SMMU_DOMAIN_S1 = 0,
141 ARM_SMMU_DOMAIN_S2,
142 ARM_SMMU_DOMAIN_NESTED,
Will Deacon61bc6712017-01-06 16:56:03 +0000143 ARM_SMMU_DOMAIN_BYPASS,
Will Deaconc752ce42014-06-25 22:46:31 +0100144};
145
Will Deacon45ae7cf2013-06-24 18:31:25 +0100146struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100147 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000148 struct io_pgtable_ops *pgtbl_ops;
Robin Murphy32b12442017-09-28 15:55:01 +0100149 const struct iommu_gather_ops *tlb_ops;
Will Deacon44680ee2014-06-25 11:29:12 +0100150 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100151 enum arm_smmu_domain_stage stage;
Robin Murphy44f68762018-09-20 17:10:27 +0100152 bool non_strict;
Will Deacon518f7132014-11-14 17:17:54 +0000153 struct mutex init_mutex; /* Protects smmu pointer */
Will Deacon8e517e72017-07-06 15:55:48 +0100154 spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
Joerg Roedel1d672632015-03-26 13:43:10 +0100155 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100156};
157
Robin Murphy021bb842016-09-14 15:26:46 +0100158static bool using_legacy_binding, using_generic_binding;
159
Sricharan Rd4a44f02018-12-04 11:52:10 +0530160static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
161{
162 if (pm_runtime_enabled(smmu->dev))
163 return pm_runtime_get_sync(smmu->dev);
164
165 return 0;
166}
167
168static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
169{
170 if (pm_runtime_enabled(smmu->dev))
171 pm_runtime_put(smmu->dev);
172}
173
Joerg Roedel1d672632015-03-26 13:43:10 +0100174static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
175{
176 return container_of(dom, struct arm_smmu_domain, domain);
177}
178
Will Deacon8f68f8e2014-07-15 11:27:08 +0100179static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100180{
181 if (dev_is_pci(dev)) {
182 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700183
Will Deacona9a1b0b2014-05-01 18:05:08 +0100184 while (!pci_is_root_bus(bus))
185 bus = bus->parent;
Robin Murphyf80cd882016-09-14 15:21:39 +0100186 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100187 }
188
Robin Murphyf80cd882016-09-14 15:21:39 +0100189 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100190}
191
Robin Murphyf80cd882016-09-14 15:21:39 +0100192static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100193{
Robin Murphyf80cd882016-09-14 15:21:39 +0100194 *((__be32 *)data) = cpu_to_be32(alias);
195 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100196}
197
Robin Murphyf80cd882016-09-14 15:21:39 +0100198static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100199{
Robin Murphyf80cd882016-09-14 15:21:39 +0100200 struct of_phandle_iterator *it = *(void **)data;
201 struct device_node *np = it->node;
202 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100203
Robin Murphyf80cd882016-09-14 15:21:39 +0100204 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
205 "#stream-id-cells", 0)
206 if (it->node == np) {
207 *(void **)data = dev;
208 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700209 }
Robin Murphyf80cd882016-09-14 15:21:39 +0100210 it->node = np;
211 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100212}
213
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100214static struct platform_driver arm_smmu_driver;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100215static struct iommu_ops arm_smmu_ops;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100216
Robin Murphyadfec2e2016-09-12 17:13:55 +0100217static int arm_smmu_register_legacy_master(struct device *dev,
218 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100219{
Robin Murphyadfec2e2016-09-12 17:13:55 +0100220 struct device *smmu_dev;
Robin Murphyf80cd882016-09-14 15:21:39 +0100221 struct device_node *np;
222 struct of_phandle_iterator it;
223 void *data = &it;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100224 u32 *sids;
Robin Murphyf80cd882016-09-14 15:21:39 +0100225 __be32 pci_sid;
226 int err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100227
Robin Murphyf80cd882016-09-14 15:21:39 +0100228 np = dev_get_dev_node(dev);
229 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
230 of_node_put(np);
231 return -ENODEV;
232 }
233
234 it.node = np;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100235 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
236 __find_legacy_master_phandle);
Robin Murphyadfec2e2016-09-12 17:13:55 +0100237 smmu_dev = data;
Robin Murphyf80cd882016-09-14 15:21:39 +0100238 of_node_put(np);
239 if (err == 0)
240 return -ENODEV;
241 if (err < 0)
242 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100243
Robin Murphyf80cd882016-09-14 15:21:39 +0100244 if (dev_is_pci(dev)) {
245 /* "mmu-masters" assumes Stream ID == Requester ID */
246 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
247 &pci_sid);
248 it.cur = &pci_sid;
249 it.cur_count = 1;
250 }
251
Robin Murphyadfec2e2016-09-12 17:13:55 +0100252 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
253 &arm_smmu_ops);
254 if (err)
255 return err;
256
257 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
258 if (!sids)
Robin Murphyf80cd882016-09-14 15:21:39 +0100259 return -ENOMEM;
260
Robin Murphyadfec2e2016-09-12 17:13:55 +0100261 *smmu = dev_get_drvdata(smmu_dev);
262 of_phandle_iterator_args(&it, sids, it.cur_count);
263 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
264 kfree(sids);
265 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100266}
267
268static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
269{
270 int idx;
271
272 do {
273 idx = find_next_zero_bit(map, end, start);
274 if (idx == end)
275 return -ENOSPC;
276 } while (test_and_set_bit(idx, map));
277
278 return idx;
279}
280
281static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
282{
283 clear_bit(idx, map);
284}
285
286/* Wait for any pending TLB invalidations to complete */
Robin Murphy19713fd2019-08-15 19:37:30 +0100287static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
288 int sync, int status)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100289{
Robin Murphy8513c892017-03-30 17:56:32 +0100290 unsigned int spin_cnt, delay;
Robin Murphy19713fd2019-08-15 19:37:30 +0100291 u32 reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100292
Robin Murphy19713fd2019-08-15 19:37:30 +0100293 arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
Robin Murphy8513c892017-03-30 17:56:32 +0100294 for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
295 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100296 reg = arm_smmu_readl(smmu, page, status);
297 if (!(reg & sTLBGSTATUS_GSACTIVE))
Robin Murphy8513c892017-03-30 17:56:32 +0100298 return;
299 cpu_relax();
Will Deacon45ae7cf2013-06-24 18:31:25 +0100300 }
Robin Murphy8513c892017-03-30 17:56:32 +0100301 udelay(delay);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100302 }
Robin Murphy8513c892017-03-30 17:56:32 +0100303 dev_err_ratelimited(smmu->dev,
304 "TLB sync timed out -- SMMU may be deadlocked\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +0100305}
306
Robin Murphy11febfc2017-03-30 17:56:31 +0100307static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
Will Deacon1463fe42013-07-31 19:21:27 +0100308{
Will Deacon8e517e72017-07-06 15:55:48 +0100309 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100310
Will Deacon8e517e72017-07-06 15:55:48 +0100311 spin_lock_irqsave(&smmu->global_sync_lock, flags);
Robin Murphy00320ce2019-08-15 19:37:31 +0100312 __arm_smmu_tlb_sync(smmu, ARM_SMMU_GR0, ARM_SMMU_GR0_sTLBGSYNC,
Robin Murphy19713fd2019-08-15 19:37:30 +0100313 ARM_SMMU_GR0_sTLBGSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100314 spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000315}
316
Robin Murphy11febfc2017-03-30 17:56:31 +0100317static void arm_smmu_tlb_sync_context(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100318{
Will Deacon518f7132014-11-14 17:17:54 +0000319 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy11febfc2017-03-30 17:56:31 +0100320 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon8e517e72017-07-06 15:55:48 +0100321 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100322
Will Deacon8e517e72017-07-06 15:55:48 +0100323 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy19713fd2019-08-15 19:37:30 +0100324 __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx),
325 ARM_SMMU_CB_TLBSYNC, ARM_SMMU_CB_TLBSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100326 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000327}
328
Robin Murphy11febfc2017-03-30 17:56:31 +0100329static void arm_smmu_tlb_sync_vmid(void *cookie)
330{
331 struct arm_smmu_domain *smmu_domain = cookie;
332
333 arm_smmu_tlb_sync_global(smmu_domain->smmu);
334}
335
336static void arm_smmu_tlb_inv_context_s1(void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000337{
338 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy44f68762018-09-20 17:10:27 +0100339 /*
Robin Murphy19713fd2019-08-15 19:37:30 +0100340 * The TLBI write may be relaxed, so ensure that PTEs cleared by the
341 * current CPU are visible beforehand.
Robin Murphy44f68762018-09-20 17:10:27 +0100342 */
Robin Murphy19713fd2019-08-15 19:37:30 +0100343 wmb();
344 arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
345 ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
Robin Murphy11febfc2017-03-30 17:56:31 +0100346 arm_smmu_tlb_sync_context(cookie);
347}
348
349static void arm_smmu_tlb_inv_context_s2(void *cookie)
350{
351 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100352 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100353
Robin Murphy00320ce2019-08-15 19:37:31 +0100354 /* See above */
355 wmb();
356 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
Robin Murphy11febfc2017-03-30 17:56:31 +0100357 arm_smmu_tlb_sync_global(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100358}
359
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100360static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
361 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000362{
363 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100364 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000365 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy19713fd2019-08-15 19:37:30 +0100366 int reg, idx = cfg->cbndx;
Will Deacon518f7132014-11-14 17:17:54 +0000367
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100368 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
Will Deacon7d321bd32018-10-01 12:42:49 +0100369 wmb();
370
Robin Murphy19713fd2019-08-15 19:37:30 +0100371 reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
Will Deacon518f7132014-11-14 17:17:54 +0000372
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100373 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
374 iova = (iova >> 12) << 12;
375 iova |= cfg->asid;
Robin Murphy75df1382015-12-07 18:18:52 +0000376 do {
Robin Murphy19713fd2019-08-15 19:37:30 +0100377 arm_smmu_cb_write(smmu, idx, reg, iova);
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100378 iova += granule;
379 } while (size -= granule);
380 } else {
381 iova >>= 12;
382 iova |= (u64)cfg->asid << 48;
383 do {
Robin Murphy19713fd2019-08-15 19:37:30 +0100384 arm_smmu_cb_writeq(smmu, idx, reg, iova);
Robin Murphy75df1382015-12-07 18:18:52 +0000385 iova += granule >> 12;
386 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000387 }
388}
389
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100390static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
391 size_t granule, bool leaf, void *cookie)
392{
393 struct arm_smmu_domain *smmu_domain = cookie;
394 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy19713fd2019-08-15 19:37:30 +0100395 int reg, idx = smmu_domain->cfg.cbndx;
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100396
397 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
398 wmb();
399
Robin Murphy19713fd2019-08-15 19:37:30 +0100400 reg = leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100401 iova >>= 12;
402 do {
Robin Murphy61005762019-08-15 19:37:28 +0100403 if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy19713fd2019-08-15 19:37:30 +0100404 arm_smmu_cb_writeq(smmu, idx, reg, iova);
Robin Murphy61005762019-08-15 19:37:28 +0100405 else
Robin Murphy19713fd2019-08-15 19:37:30 +0100406 arm_smmu_cb_write(smmu, idx, reg, iova);
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100407 iova += granule >> 12;
408 } while (size -= granule);
409}
410
Robin Murphy11febfc2017-03-30 17:56:31 +0100411/*
412 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
413 * almost negligible, but the benefit of getting the first one in as far ahead
414 * of the sync as possible is significant, hence we don't just make this a
415 * no-op and set .tlb_sync to arm_smmu_inv_context_s2() as you might think.
416 */
417static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
418 size_t granule, bool leaf, void *cookie)
419{
420 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy00320ce2019-08-15 19:37:31 +0100421 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy11febfc2017-03-30 17:56:31 +0100422
Robin Murphy00320ce2019-08-15 19:37:31 +0100423 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
Will Deacon7d321bd32018-10-01 12:42:49 +0100424 wmb();
425
Robin Murphy00320ce2019-08-15 19:37:31 +0100426 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
Robin Murphy11febfc2017-03-30 17:56:31 +0100427}
428
429static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = {
430 .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100431 .tlb_add_flush = arm_smmu_tlb_inv_range_s1,
Robin Murphy11febfc2017-03-30 17:56:31 +0100432 .tlb_sync = arm_smmu_tlb_sync_context,
433};
434
435static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = {
436 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
Robin Murphy71e8a8c2019-08-15 19:37:27 +0100437 .tlb_add_flush = arm_smmu_tlb_inv_range_s2,
Robin Murphy11febfc2017-03-30 17:56:31 +0100438 .tlb_sync = arm_smmu_tlb_sync_context,
439};
440
441static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = {
442 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
443 .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync,
444 .tlb_sync = arm_smmu_tlb_sync_vmid,
Will Deacon518f7132014-11-14 17:17:54 +0000445};
446
Will Deacon45ae7cf2013-06-24 18:31:25 +0100447static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
448{
Vivek Gautambc580b52019-04-22 12:40:36 +0530449 u32 fsr, fsynr, cbfrsynra;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100450 unsigned long iova;
451 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100452 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100453 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy19713fd2019-08-15 19:37:30 +0100454 int idx = smmu_domain->cfg.cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100455
Robin Murphy19713fd2019-08-15 19:37:30 +0100456 fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100457 if (!(fsr & FSR_FAULT))
458 return IRQ_NONE;
459
Robin Murphy19713fd2019-08-15 19:37:30 +0100460 fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
461 iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
462 cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100463
Will Deacon3714ce1d2016-08-05 19:49:45 +0100464 dev_err_ratelimited(smmu->dev,
Vivek Gautambc580b52019-04-22 12:40:36 +0530465 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
Robin Murphy19713fd2019-08-15 19:37:30 +0100466 fsr, iova, fsynr, cbfrsynra, idx);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100467
Robin Murphy19713fd2019-08-15 19:37:30 +0100468 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100469 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100470}
471
472static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
473{
474 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
475 struct arm_smmu_device *smmu = dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100476
Robin Murphy00320ce2019-08-15 19:37:31 +0100477 gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
478 gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0);
479 gfsynr1 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR1);
480 gfsynr2 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100481
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000482 if (!gfsr)
483 return IRQ_NONE;
484
Will Deacon45ae7cf2013-06-24 18:31:25 +0100485 dev_err_ratelimited(smmu->dev,
486 "Unexpected global fault, this could be serious\n");
487 dev_err_ratelimited(smmu->dev,
488 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
489 gfsr, gfsynr0, gfsynr1, gfsynr2);
490
Robin Murphy00320ce2019-08-15 19:37:31 +0100491 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr);
Will Deaconadaba322013-07-31 19:21:26 +0100492 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100493}
494
Will Deacon518f7132014-11-14 17:17:54 +0000495static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
496 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100497{
Will Deacon44680ee2014-06-25 11:29:12 +0100498 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy90df3732017-08-08 14:56:14 +0100499 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
500 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
501
502 cb->cfg = cfg;
503
Robin Murphy620565a2019-08-15 19:37:25 +0100504 /* TCR */
Robin Murphy90df3732017-08-08 14:56:14 +0100505 if (stage1) {
506 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
507 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
508 } else {
509 cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
510 cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Robin Murphy620565a2019-08-15 19:37:25 +0100511 cb->tcr[1] |= FIELD_PREP(TCR2_SEP, TCR2_SEP_UPSTREAM);
Robin Murphy90df3732017-08-08 14:56:14 +0100512 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy620565a2019-08-15 19:37:25 +0100513 cb->tcr[1] |= TCR2_AS;
Robin Murphy90df3732017-08-08 14:56:14 +0100514 }
515 } else {
516 cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
517 }
518
519 /* TTBRs */
520 if (stage1) {
521 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
522 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
523 cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
524 } else {
525 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Robin Murphy620565a2019-08-15 19:37:25 +0100526 cb->ttbr[0] |= FIELD_PREP(TTBRn_ASID, cfg->asid);
Robin Murphy90df3732017-08-08 14:56:14 +0100527 cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Robin Murphy620565a2019-08-15 19:37:25 +0100528 cb->ttbr[1] |= FIELD_PREP(TTBRn_ASID, cfg->asid);
Robin Murphy90df3732017-08-08 14:56:14 +0100529 }
530 } else {
531 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
532 }
533
534 /* MAIRs (stage-1 only) */
535 if (stage1) {
536 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
537 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
538 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
539 } else {
540 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
541 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
542 }
543 }
544}
545
546static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
547{
548 u32 reg;
549 bool stage1;
550 struct arm_smmu_cb *cb = &smmu->cbs[idx];
551 struct arm_smmu_cfg *cfg = cb->cfg;
Robin Murphy90df3732017-08-08 14:56:14 +0100552
553 /* Unassigned context banks only need disabling */
554 if (!cfg) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100555 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, 0);
Robin Murphy90df3732017-08-08 14:56:14 +0100556 return;
557 }
558
Will Deacon44680ee2014-06-25 11:29:12 +0100559 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100560
Robin Murphy90df3732017-08-08 14:56:14 +0100561 /* CBA2R */
Will Deacon4a1c93c2015-03-04 12:21:03 +0000562 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100563 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy5114e962019-08-15 19:37:24 +0100564 reg = CBA2R_VA64;
Robin Murphy7602b872016-04-28 17:12:09 +0100565 else
Robin Murphy5114e962019-08-15 19:37:24 +0100566 reg = 0;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800567 /* 16-bit VMIDs live in CBA2R */
568 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Robin Murphy5114e962019-08-15 19:37:24 +0100569 reg |= FIELD_PREP(CBA2R_VMID16, cfg->vmid);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800570
Robin Murphyaadbf212019-08-15 19:37:29 +0100571 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBA2R(idx), reg);
Will Deacon4a1c93c2015-03-04 12:21:03 +0000572 }
573
Will Deacon45ae7cf2013-06-24 18:31:25 +0100574 /* CBAR */
Robin Murphy5114e962019-08-15 19:37:24 +0100575 reg = FIELD_PREP(CBAR_TYPE, cfg->cbar);
Robin Murphyb7862e32016-04-13 18:13:03 +0100576 if (smmu->version < ARM_SMMU_V2)
Robin Murphy5114e962019-08-15 19:37:24 +0100577 reg |= FIELD_PREP(CBAR_IRPTNDX, cfg->irptndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100578
Will Deacon57ca90f2014-02-06 14:59:05 +0000579 /*
580 * Use the weakest shareability/memory types, so they are
581 * overridden by the ttbcr/pte.
582 */
583 if (stage1) {
Robin Murphy5114e962019-08-15 19:37:24 +0100584 reg |= FIELD_PREP(CBAR_S1_BPSHCFG, CBAR_S1_BPSHCFG_NSH) |
585 FIELD_PREP(CBAR_S1_MEMATTR, CBAR_S1_MEMATTR_WB);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800586 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
587 /* 8-bit VMIDs live in CBAR */
Robin Murphy5114e962019-08-15 19:37:24 +0100588 reg |= FIELD_PREP(CBAR_VMID, cfg->vmid);
Will Deacon57ca90f2014-02-06 14:59:05 +0000589 }
Robin Murphyaadbf212019-08-15 19:37:29 +0100590 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100591
Sunil Goutham125458a2017-03-28 16:11:12 +0530592 /*
Robin Murphy620565a2019-08-15 19:37:25 +0100593 * TCR
Sunil Goutham125458a2017-03-28 16:11:12 +0530594 * We must write this before the TTBRs, since it determines the
595 * access behaviour of some fields (in particular, ASID[15:8]).
596 */
Robin Murphy90df3732017-08-08 14:56:14 +0100597 if (stage1 && smmu->version > ARM_SMMU_V1)
Robin Murphy19713fd2019-08-15 19:37:30 +0100598 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]);
599 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100600
Will Deacon45ae7cf2013-06-24 18:31:25 +0100601 /* TTBRs */
Robin Murphy90df3732017-08-08 14:56:14 +0100602 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100603 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid);
604 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
605 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100606 } else {
Robin Murphy19713fd2019-08-15 19:37:30 +0100607 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
Robin Murphy90df3732017-08-08 14:56:14 +0100608 if (stage1)
Robin Murphy19713fd2019-08-15 19:37:30 +0100609 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR1,
610 cb->ttbr[1]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100611 }
612
Will Deacon518f7132014-11-14 17:17:54 +0000613 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100614 if (stage1) {
Robin Murphy19713fd2019-08-15 19:37:30 +0100615 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]);
616 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100617 }
618
Will Deacon45ae7cf2013-06-24 18:31:25 +0100619 /* SCTLR */
Robin Murphy60705292016-08-11 17:44:06 +0100620 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100621 if (stage1)
622 reg |= SCTLR_S1_ASIDPNE;
Robin Murphy90df3732017-08-08 14:56:14 +0100623 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
624 reg |= SCTLR_E;
625
Robin Murphy19713fd2019-08-15 19:37:30 +0100626 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100627}
628
629static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100630 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100631{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100632 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000633 unsigned long ias, oas;
634 struct io_pgtable_ops *pgtbl_ops;
635 struct io_pgtable_cfg pgtbl_cfg;
636 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100637 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100638 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100639
Will Deacon518f7132014-11-14 17:17:54 +0000640 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100641 if (smmu_domain->smmu)
642 goto out_unlock;
643
Will Deacon61bc6712017-01-06 16:56:03 +0000644 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
645 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
646 smmu_domain->smmu = smmu;
647 goto out_unlock;
648 }
649
Will Deaconc752ce42014-06-25 22:46:31 +0100650 /*
651 * Mapping the requested stage onto what we support is surprisingly
652 * complicated, mainly because the spec allows S1+S2 SMMUs without
653 * support for nested translation. That means we end up with the
654 * following table:
655 *
656 * Requested Supported Actual
657 * S1 N S1
658 * S1 S1+S2 S1
659 * S1 S2 S2
660 * S1 S1 S1
661 * N N N
662 * N S1+S2 S2
663 * N S2 S2
664 * N S1 S1
665 *
666 * Note that you can't actually request stage-2 mappings.
667 */
668 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
669 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
670 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
671 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
672
Robin Murphy7602b872016-04-28 17:12:09 +0100673 /*
674 * Choosing a suitable context format is even more fiddly. Until we
675 * grow some way for the caller to express a preference, and/or move
676 * the decision into the io-pgtable code where it arguably belongs,
677 * just aim for the closest thing to the rest of the system, and hope
678 * that the hardware isn't esoteric enough that we can't assume AArch64
679 * support to be a superset of AArch32 support...
680 */
681 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
682 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100683 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
684 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
685 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
686 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
687 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100688 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
689 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
690 ARM_SMMU_FEAT_FMT_AARCH64_16K |
691 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
692 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
693
694 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
695 ret = -EINVAL;
696 goto out_unlock;
697 }
698
Will Deaconc752ce42014-06-25 22:46:31 +0100699 switch (smmu_domain->stage) {
700 case ARM_SMMU_DOMAIN_S1:
701 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
702 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000703 ias = smmu->va_size;
704 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100705 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000706 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +0100707 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +0000708 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100709 ias = min(ias, 32UL);
710 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +0100711 } else {
712 fmt = ARM_V7S;
713 ias = min(ias, 32UL);
714 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +0100715 }
Robin Murphy32b12442017-09-28 15:55:01 +0100716 smmu_domain->tlb_ops = &arm_smmu_s1_tlb_ops;
Will Deaconc752ce42014-06-25 22:46:31 +0100717 break;
718 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100719 /*
720 * We will likely want to change this if/when KVM gets
721 * involved.
722 */
Will Deaconc752ce42014-06-25 22:46:31 +0100723 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100724 cfg->cbar = CBAR_TYPE_S2_TRANS;
725 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000726 ias = smmu->ipa_size;
727 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100728 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000729 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100730 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000731 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100732 ias = min(ias, 40UL);
733 oas = min(oas, 40UL);
734 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100735 if (smmu->version == ARM_SMMU_V2)
Robin Murphy32b12442017-09-28 15:55:01 +0100736 smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v2;
Robin Murphy11febfc2017-03-30 17:56:31 +0100737 else
Robin Murphy32b12442017-09-28 15:55:01 +0100738 smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v1;
Will Deaconc752ce42014-06-25 22:46:31 +0100739 break;
740 default:
741 ret = -EINVAL;
742 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100743 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100744 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
745 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200746 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100747 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100748
Will Deacon44680ee2014-06-25 11:29:12 +0100749 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100750 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100751 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
752 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100753 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100754 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100755 }
756
Robin Murphy280b6832017-03-30 17:56:29 +0100757 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
758 cfg->vmid = cfg->cbndx + 1 + smmu->cavium_id_base;
759 else
760 cfg->asid = cfg->cbndx + smmu->cavium_id_base;
761
Will Deacon518f7132014-11-14 17:17:54 +0000762 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100763 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000764 .ias = ias,
765 .oas = oas,
Will Deacon4f418452019-06-25 12:51:25 +0100766 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
Robin Murphy32b12442017-09-28 15:55:01 +0100767 .tlb = smmu_domain->tlb_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100768 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000769 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100770
Robin Murphy44f68762018-09-20 17:10:27 +0100771 if (smmu_domain->non_strict)
772 pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
773
Will Deacon518f7132014-11-14 17:17:54 +0000774 smmu_domain->smmu = smmu;
775 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
776 if (!pgtbl_ops) {
777 ret = -ENOMEM;
778 goto out_clear_smmu;
779 }
780
Robin Murphyd5466352016-05-09 17:20:09 +0100781 /* Update the domain's page sizes to reflect the page table format */
782 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Robin Murphy455eb7d2016-09-12 17:13:58 +0100783 domain->geometry.aperture_end = (1UL << ias) - 1;
784 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +0000785
786 /* Initialise the context bank with our page table cfg */
787 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
Robin Murphy90df3732017-08-08 14:56:14 +0100788 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000789
790 /*
791 * Request context fault interrupt. Do this last to avoid the
792 * handler seeing a half-initialised domain state.
793 */
Will Deacon44680ee2014-06-25 11:29:12 +0100794 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800795 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
796 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200797 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100798 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100799 cfg->irptndx, irq);
800 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100801 }
802
Will Deacon518f7132014-11-14 17:17:54 +0000803 mutex_unlock(&smmu_domain->init_mutex);
804
805 /* Publish page table ops for map/unmap */
806 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100807 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100808
Will Deacon518f7132014-11-14 17:17:54 +0000809out_clear_smmu:
810 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100811out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +0000812 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100813 return ret;
814}
815
816static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
817{
Joerg Roedel1d672632015-03-26 13:43:10 +0100818 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100819 struct arm_smmu_device *smmu = smmu_domain->smmu;
820 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Sricharan Rd4a44f02018-12-04 11:52:10 +0530821 int ret, irq;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100822
Will Deacon61bc6712017-01-06 16:56:03 +0000823 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100824 return;
825
Sricharan Rd4a44f02018-12-04 11:52:10 +0530826 ret = arm_smmu_rpm_get(smmu);
827 if (ret < 0)
828 return;
829
Will Deacon518f7132014-11-14 17:17:54 +0000830 /*
831 * Disable the context bank and free the page tables before freeing
832 * it.
833 */
Robin Murphy90df3732017-08-08 14:56:14 +0100834 smmu->cbs[cfg->cbndx].cfg = NULL;
835 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100836
Will Deacon44680ee2014-06-25 11:29:12 +0100837 if (cfg->irptndx != INVALID_IRPTNDX) {
838 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800839 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100840 }
841
Markus Elfring44830b02015-11-06 18:32:41 +0100842 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +0100843 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Sricharan Rd4a44f02018-12-04 11:52:10 +0530844
845 arm_smmu_rpm_put(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100846}
847
Joerg Roedel1d672632015-03-26 13:43:10 +0100848static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100849{
850 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100851
Will Deacon61bc6712017-01-06 16:56:03 +0000852 if (type != IOMMU_DOMAIN_UNMANAGED &&
853 type != IOMMU_DOMAIN_DMA &&
854 type != IOMMU_DOMAIN_IDENTITY)
Joerg Roedel1d672632015-03-26 13:43:10 +0100855 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100856 /*
857 * Allocate the domain and initialise some of its data structures.
858 * We can't really do anything meaningful until we've added a
859 * master.
860 */
861 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
862 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +0100863 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100864
Robin Murphy021bb842016-09-14 15:26:46 +0100865 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
866 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +0000867 kfree(smmu_domain);
868 return NULL;
869 }
870
Will Deacon518f7132014-11-14 17:17:54 +0000871 mutex_init(&smmu_domain->init_mutex);
Robin Murphy523d7422017-06-22 16:53:56 +0100872 spin_lock_init(&smmu_domain->cb_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +0100873
874 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100875}
876
Joerg Roedel1d672632015-03-26 13:43:10 +0100877static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100878{
Joerg Roedel1d672632015-03-26 13:43:10 +0100879 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +0100880
881 /*
882 * Free the domain resources. We assume that all devices have
883 * already been detached.
884 */
Robin Murphy9adb9592016-01-26 18:06:36 +0000885 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100886 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100887 kfree(smmu_domain);
888}
889
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100890static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
891{
892 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphy0caf5f42019-08-15 19:37:23 +0100893 u32 reg = FIELD_PREP(SMR_ID, smr->id) | FIELD_PREP(SMR_MASK, smr->mask);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100894
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300895 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100896 reg |= SMR_VALID;
Robin Murphy00320ce2019-08-15 19:37:31 +0100897 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100898}
899
Robin Murphy8e8b2032016-09-12 17:13:50 +0100900static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
901{
902 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
Robin Murphy0caf5f42019-08-15 19:37:23 +0100903 u32 reg = FIELD_PREP(S2CR_TYPE, s2cr->type) |
904 FIELD_PREP(S2CR_CBNDX, s2cr->cbndx) |
905 FIELD_PREP(S2CR_PRIVCFG, s2cr->privcfg);
Robin Murphy8e8b2032016-09-12 17:13:50 +0100906
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300907 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
908 smmu->smrs[idx].valid)
909 reg |= S2CR_EXIDVALID;
Robin Murphy00320ce2019-08-15 19:37:31 +0100910 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
Robin Murphy8e8b2032016-09-12 17:13:50 +0100911}
912
913static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
914{
915 arm_smmu_write_s2cr(smmu, idx);
916 if (smmu->smrs)
917 arm_smmu_write_smr(smmu, idx);
918}
919
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300920/*
921 * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
922 * should be called after sCR0 is written.
923 */
924static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
925{
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300926 u32 smr;
927
928 if (!smmu->smrs)
929 return;
930
931 /*
932 * SMR.ID bits may not be preserved if the corresponding MASK
933 * bits are set, so check each one separately. We can reject
934 * masters later if they try to claim IDs outside these masks.
935 */
Robin Murphy0caf5f42019-08-15 19:37:23 +0100936 smr = FIELD_PREP(SMR_ID, smmu->streamid_mask);
Robin Murphy00320ce2019-08-15 19:37:31 +0100937 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(0), smr);
938 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(0));
Robin Murphy0caf5f42019-08-15 19:37:23 +0100939 smmu->streamid_mask = FIELD_GET(SMR_ID, smr);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300940
Robin Murphy0caf5f42019-08-15 19:37:23 +0100941 smr = FIELD_PREP(SMR_MASK, smmu->streamid_mask);
Robin Murphy00320ce2019-08-15 19:37:31 +0100942 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(0), smr);
943 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(0));
Robin Murphy0caf5f42019-08-15 19:37:23 +0100944 smmu->smr_mask_mask = FIELD_GET(SMR_MASK, smr);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300945}
946
Robin Murphy588888a2016-09-12 17:13:54 +0100947static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100948{
949 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy588888a2016-09-12 17:13:54 +0100950 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100951
Robin Murphy588888a2016-09-12 17:13:54 +0100952 /* Stream indexing is blissfully easy */
953 if (!smrs)
954 return id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100955
Robin Murphy588888a2016-09-12 17:13:54 +0100956 /* Validating SMRs is... less so */
957 for (i = 0; i < smmu->num_mapping_groups; ++i) {
958 if (!smrs[i].valid) {
959 /*
960 * Note the first free entry we come across, which
961 * we'll claim in the end if nothing else matches.
962 */
963 if (free_idx < 0)
964 free_idx = i;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100965 continue;
966 }
Robin Murphy588888a2016-09-12 17:13:54 +0100967 /*
968 * If the new entry is _entirely_ matched by an existing entry,
969 * then reuse that, with the guarantee that there also cannot
970 * be any subsequent conflicting entries. In normal use we'd
971 * expect simply identical entries for this case, but there's
972 * no harm in accommodating the generalisation.
973 */
974 if ((mask & smrs[i].mask) == mask &&
975 !((id ^ smrs[i].id) & ~smrs[i].mask))
976 return i;
977 /*
978 * If the new entry has any other overlap with an existing one,
979 * though, then there always exists at least one stream ID
980 * which would cause a conflict, and we can't allow that risk.
981 */
982 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
983 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100984 }
985
Robin Murphy588888a2016-09-12 17:13:54 +0100986 return free_idx;
987}
988
989static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
990{
991 if (--smmu->s2crs[idx].count)
992 return false;
993
994 smmu->s2crs[idx] = s2cr_init_val;
995 if (smmu->smrs)
996 smmu->smrs[idx].valid = false;
997
998 return true;
999}
1000
1001static int arm_smmu_master_alloc_smes(struct device *dev)
1002{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001003 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001004 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy588888a2016-09-12 17:13:54 +01001005 struct arm_smmu_device *smmu = cfg->smmu;
1006 struct arm_smmu_smr *smrs = smmu->smrs;
1007 struct iommu_group *group;
1008 int i, idx, ret;
1009
1010 mutex_lock(&smmu->stream_map_mutex);
1011 /* Figure out a viable stream map entry allocation */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001012 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001013 u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]);
1014 u16 mask = FIELD_GET(SMR_MASK, fwspec->ids[i]);
Robin Murphy021bb842016-09-14 15:26:46 +01001015
Robin Murphy588888a2016-09-12 17:13:54 +01001016 if (idx != INVALID_SMENDX) {
1017 ret = -EEXIST;
1018 goto out_err;
1019 }
1020
Robin Murphy021bb842016-09-14 15:26:46 +01001021 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy588888a2016-09-12 17:13:54 +01001022 if (ret < 0)
1023 goto out_err;
1024
1025 idx = ret;
1026 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy021bb842016-09-14 15:26:46 +01001027 smrs[idx].id = sid;
1028 smrs[idx].mask = mask;
Robin Murphy588888a2016-09-12 17:13:54 +01001029 smrs[idx].valid = true;
1030 }
1031 smmu->s2crs[idx].count++;
1032 cfg->smendx[i] = (s16)idx;
1033 }
1034
1035 group = iommu_group_get_for_dev(dev);
1036 if (!group)
1037 group = ERR_PTR(-ENOMEM);
1038 if (IS_ERR(group)) {
1039 ret = PTR_ERR(group);
1040 goto out_err;
1041 }
1042 iommu_group_put(group);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001043
Will Deacon45ae7cf2013-06-24 18:31:25 +01001044 /* It worked! Now, poke the actual hardware */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001045 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001046 arm_smmu_write_sme(smmu, idx);
1047 smmu->s2crs[idx].group = group;
1048 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001049
Robin Murphy588888a2016-09-12 17:13:54 +01001050 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001051 return 0;
1052
Robin Murphy588888a2016-09-12 17:13:54 +01001053out_err:
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001054 while (i--) {
Robin Murphy588888a2016-09-12 17:13:54 +01001055 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001056 cfg->smendx[i] = INVALID_SMENDX;
1057 }
Robin Murphy588888a2016-09-12 17:13:54 +01001058 mutex_unlock(&smmu->stream_map_mutex);
1059 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001060}
1061
Robin Murphyadfec2e2016-09-12 17:13:55 +01001062static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001063{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001064 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1065 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphyd3097e32016-09-12 17:13:53 +01001066 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001067
Robin Murphy588888a2016-09-12 17:13:54 +01001068 mutex_lock(&smmu->stream_map_mutex);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001069 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001070 if (arm_smmu_free_sme(smmu, idx))
1071 arm_smmu_write_sme(smmu, idx);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001072 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001073 }
Robin Murphy588888a2016-09-12 17:13:54 +01001074 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001075}
1076
Will Deacon45ae7cf2013-06-24 18:31:25 +01001077static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphyadfec2e2016-09-12 17:13:55 +01001078 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001079{
Will Deacon44680ee2014-06-25 11:29:12 +01001080 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001081 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001082 u8 cbndx = smmu_domain->cfg.cbndx;
Will Deacon61bc6712017-01-06 16:56:03 +00001083 enum arm_smmu_s2cr_type type;
Robin Murphy588888a2016-09-12 17:13:54 +01001084 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001085
Will Deacon61bc6712017-01-06 16:56:03 +00001086 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
1087 type = S2CR_TYPE_BYPASS;
1088 else
1089 type = S2CR_TYPE_TRANS;
1090
Robin Murphyadfec2e2016-09-12 17:13:55 +01001091 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy8e8b2032016-09-12 17:13:50 +01001092 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy588888a2016-09-12 17:13:54 +01001093 continue;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001094
Robin Murphy8e8b2032016-09-12 17:13:50 +01001095 s2cr[idx].type = type;
Sricharan Re1989802017-01-06 18:58:15 +05301096 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001097 s2cr[idx].cbndx = cbndx;
1098 arm_smmu_write_s2cr(smmu, idx);
Will Deacon43b412b2014-07-15 11:22:24 +01001099 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001100 return 0;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001101}
1102
Will Deacon45ae7cf2013-06-24 18:31:25 +01001103static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1104{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001105 int ret;
Joerg Roedel9b468f72018-11-29 14:01:00 +01001106 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001107 struct arm_smmu_device *smmu;
Joerg Roedel1d672632015-03-26 13:43:10 +01001108 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001109
Robin Murphyadfec2e2016-09-12 17:13:55 +01001110 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001111 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1112 return -ENXIO;
1113 }
1114
Robin Murphyfba4f8e2016-10-17 12:06:21 +01001115 /*
1116 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1117 * domains between of_xlate() and add_device() - we have no way to cope
1118 * with that, so until ARM gets converted to rely on groups and default
1119 * domains, just say no (but more politely than by dereferencing NULL).
1120 * This should be at least a WARN_ON once that's sorted.
1121 */
1122 if (!fwspec->iommu_priv)
1123 return -ENODEV;
1124
Robin Murphyadfec2e2016-09-12 17:13:55 +01001125 smmu = fwspec_smmu(fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301126
1127 ret = arm_smmu_rpm_get(smmu);
1128 if (ret < 0)
1129 return ret;
1130
Will Deacon518f7132014-11-14 17:17:54 +00001131 /* Ensure that the domain is finalised */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001132 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001133 if (ret < 0)
Sricharan Rd4a44f02018-12-04 11:52:10 +05301134 goto rpm_put;
Will Deacon518f7132014-11-14 17:17:54 +00001135
Will Deacon45ae7cf2013-06-24 18:31:25 +01001136 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001137 * Sanity check the domain. We don't support domains across
1138 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001139 */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001140 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001141 dev_err(dev,
1142 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphyadfec2e2016-09-12 17:13:55 +01001143 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Sricharan Rd4a44f02018-12-04 11:52:10 +05301144 ret = -EINVAL;
1145 goto rpm_put;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001146 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001147
1148 /* Looks ok, so add the device to the domain */
Sricharan Rd4a44f02018-12-04 11:52:10 +05301149 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
1150
1151rpm_put:
1152 arm_smmu_rpm_put(smmu);
1153 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001154}
1155
Will Deacon45ae7cf2013-06-24 18:31:25 +01001156static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001157 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001158{
Robin Murphy523d7422017-06-22 16:53:56 +01001159 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301160 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1161 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001162
Will Deacon518f7132014-11-14 17:17:54 +00001163 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001164 return -ENODEV;
1165
Sricharan Rd4a44f02018-12-04 11:52:10 +05301166 arm_smmu_rpm_get(smmu);
1167 ret = ops->map(ops, iova, paddr, size, prot);
1168 arm_smmu_rpm_put(smmu);
1169
1170 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001171}
1172
1173static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1174 size_t size)
1175{
Robin Murphy523d7422017-06-22 16:53:56 +01001176 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301177 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1178 size_t ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001179
Will Deacon518f7132014-11-14 17:17:54 +00001180 if (!ops)
1181 return 0;
1182
Sricharan Rd4a44f02018-12-04 11:52:10 +05301183 arm_smmu_rpm_get(smmu);
1184 ret = ops->unmap(ops, iova, size);
1185 arm_smmu_rpm_put(smmu);
1186
1187 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001188}
1189
Robin Murphy44f68762018-09-20 17:10:27 +01001190static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
1191{
1192 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301193 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy44f68762018-09-20 17:10:27 +01001194
Sricharan Rd4a44f02018-12-04 11:52:10 +05301195 if (smmu_domain->tlb_ops) {
1196 arm_smmu_rpm_get(smmu);
Robin Murphy44f68762018-09-20 17:10:27 +01001197 smmu_domain->tlb_ops->tlb_flush_all(smmu_domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301198 arm_smmu_rpm_put(smmu);
1199 }
Robin Murphy44f68762018-09-20 17:10:27 +01001200}
1201
Robin Murphy32b12442017-09-28 15:55:01 +01001202static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
1203{
1204 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301205 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy32b12442017-09-28 15:55:01 +01001206
Sricharan Rd4a44f02018-12-04 11:52:10 +05301207 if (smmu_domain->tlb_ops) {
1208 arm_smmu_rpm_get(smmu);
Robin Murphy32b12442017-09-28 15:55:01 +01001209 smmu_domain->tlb_ops->tlb_sync(smmu_domain);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301210 arm_smmu_rpm_put(smmu);
1211 }
Robin Murphy32b12442017-09-28 15:55:01 +01001212}
1213
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001214static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1215 dma_addr_t iova)
1216{
Joerg Roedel1d672632015-03-26 13:43:10 +01001217 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001218 struct arm_smmu_device *smmu = smmu_domain->smmu;
1219 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1220 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1221 struct device *dev = smmu->dev;
Robin Murphy19713fd2019-08-15 19:37:30 +01001222 void __iomem *reg;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001223 u32 tmp;
1224 u64 phys;
Robin Murphy523d7422017-06-22 16:53:56 +01001225 unsigned long va, flags;
Robin Murphy19713fd2019-08-15 19:37:30 +01001226 int ret, idx = cfg->cbndx;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301227
1228 ret = arm_smmu_rpm_get(smmu);
1229 if (ret < 0)
1230 return 0;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001231
Robin Murphy523d7422017-06-22 16:53:56 +01001232 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy661d9622015-05-27 17:09:34 +01001233 va = iova & ~0xfffUL;
Robin Murphy61005762019-08-15 19:37:28 +01001234 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
Robin Murphy19713fd2019-08-15 19:37:30 +01001235 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
Robin Murphy61005762019-08-15 19:37:28 +01001236 else
Robin Murphy19713fd2019-08-15 19:37:30 +01001237 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001238
Robin Murphy19713fd2019-08-15 19:37:30 +01001239 reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR;
1240 if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ATSR_ACTIVE), 5, 50)) {
Robin Murphy523d7422017-06-22 16:53:56 +01001241 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001242 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001243 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001244 &iova);
1245 return ops->iova_to_phys(ops, iova);
1246 }
1247
Robin Murphy19713fd2019-08-15 19:37:30 +01001248 phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR);
Robin Murphy523d7422017-06-22 16:53:56 +01001249 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001250 if (phys & CB_PAR_F) {
1251 dev_err(dev, "translation fault!\n");
1252 dev_err(dev, "PAR = 0x%llx\n", phys);
1253 return 0;
1254 }
1255
Sricharan Rd4a44f02018-12-04 11:52:10 +05301256 arm_smmu_rpm_put(smmu);
1257
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001258 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1259}
1260
Will Deacon45ae7cf2013-06-24 18:31:25 +01001261static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001262 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001263{
Joerg Roedel1d672632015-03-26 13:43:10 +01001264 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Robin Murphy523d7422017-06-22 16:53:56 +01001265 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001266
Sunil Gouthambdf95922017-04-25 15:27:52 +05301267 if (domain->type == IOMMU_DOMAIN_IDENTITY)
1268 return iova;
1269
Will Deacon518f7132014-11-14 17:17:54 +00001270 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001271 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001272
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001273 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
Robin Murphy523d7422017-06-22 16:53:56 +01001274 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1275 return arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001276
Robin Murphy523d7422017-06-22 16:53:56 +01001277 return ops->iova_to_phys(ops, iova);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001278}
1279
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001280static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001281{
Will Deacond0948942014-06-24 17:30:10 +01001282 switch (cap) {
1283 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001284 /*
1285 * Return true here as the SMMU can always send out coherent
1286 * requests.
1287 */
1288 return true;
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001289 case IOMMU_CAP_NOEXEC:
1290 return true;
Will Deacond0948942014-06-24 17:30:10 +01001291 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001292 return false;
Will Deacond0948942014-06-24 17:30:10 +01001293 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001294}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001295
Suzuki K Poulose92ce7e82019-06-14 18:54:00 +01001296static int arm_smmu_match_node(struct device *dev, const void *data)
Robin Murphy021bb842016-09-14 15:26:46 +01001297{
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001298 return dev->fwnode == data;
Robin Murphy021bb842016-09-14 15:26:46 +01001299}
1300
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001301static
1302struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
Robin Murphy021bb842016-09-14 15:26:46 +01001303{
1304 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001305 fwnode, arm_smmu_match_node);
Robin Murphy021bb842016-09-14 15:26:46 +01001306 put_device(dev);
1307 return dev ? dev_get_drvdata(dev) : NULL;
1308}
1309
Will Deacon03edb222015-01-19 14:27:33 +00001310static int arm_smmu_add_device(struct device *dev)
1311{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001312 struct arm_smmu_device *smmu;
Robin Murphyf80cd882016-09-14 15:21:39 +01001313 struct arm_smmu_master_cfg *cfg;
Joerg Roedel9b468f72018-11-29 14:01:00 +01001314 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001315 int i, ret;
1316
Robin Murphy021bb842016-09-14 15:26:46 +01001317 if (using_legacy_binding) {
1318 ret = arm_smmu_register_legacy_master(dev, &smmu);
Artem Savkova7990c62017-08-08 12:26:02 +02001319
1320 /*
1321 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
1322 * will allocate/initialise a new one. Thus we need to update fwspec for
1323 * later use.
1324 */
Joerg Roedel9b468f72018-11-29 14:01:00 +01001325 fwspec = dev_iommu_fwspec_get(dev);
Robin Murphy021bb842016-09-14 15:26:46 +01001326 if (ret)
1327 goto out_free;
Robin Murphy3c117b52016-11-02 17:31:32 +00001328 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001329 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
Robin Murphy021bb842016-09-14 15:26:46 +01001330 } else {
1331 return -ENODEV;
1332 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001333
1334 ret = -EINVAL;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001335 for (i = 0; i < fwspec->num_ids; i++) {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001336 u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]);
1337 u16 mask = FIELD_GET(SMR_MASK, fwspec->ids[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01001338
Robin Murphyadfec2e2016-09-12 17:13:55 +01001339 if (sid & ~smmu->streamid_mask) {
Robin Murphyf80cd882016-09-14 15:21:39 +01001340 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy021bb842016-09-14 15:26:46 +01001341 sid, smmu->streamid_mask);
1342 goto out_free;
1343 }
1344 if (mask & ~smmu->smr_mask_mask) {
1345 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
Peng Fan6323f472017-04-21 17:03:36 +08001346 mask, smmu->smr_mask_mask);
Robin Murphyf80cd882016-09-14 15:21:39 +01001347 goto out_free;
1348 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001349 }
Will Deacon03edb222015-01-19 14:27:33 +00001350
Robin Murphyadfec2e2016-09-12 17:13:55 +01001351 ret = -ENOMEM;
1352 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1353 GFP_KERNEL);
1354 if (!cfg)
1355 goto out_free;
1356
1357 cfg->smmu = smmu;
1358 fwspec->iommu_priv = cfg;
1359 while (i--)
1360 cfg->smendx[i] = INVALID_SMENDX;
1361
Sricharan Rd4a44f02018-12-04 11:52:10 +05301362 ret = arm_smmu_rpm_get(smmu);
1363 if (ret < 0)
1364 goto out_cfg_free;
1365
Robin Murphy588888a2016-09-12 17:13:54 +01001366 ret = arm_smmu_master_alloc_smes(dev);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301367 arm_smmu_rpm_put(smmu);
1368
Robin Murphyadfec2e2016-09-12 17:13:55 +01001369 if (ret)
Vivek Gautamc54451a2017-07-06 15:07:00 +05301370 goto out_cfg_free;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001371
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001372 iommu_device_link(&smmu->iommu, dev);
1373
Sricharan R655e3642018-12-04 11:52:11 +05301374 device_link_add(dev, smmu->dev,
1375 DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
1376
Robin Murphyadfec2e2016-09-12 17:13:55 +01001377 return 0;
Robin Murphyf80cd882016-09-14 15:21:39 +01001378
Vivek Gautamc54451a2017-07-06 15:07:00 +05301379out_cfg_free:
1380 kfree(cfg);
Robin Murphyf80cd882016-09-14 15:21:39 +01001381out_free:
Robin Murphyadfec2e2016-09-12 17:13:55 +01001382 iommu_fwspec_free(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001383 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00001384}
1385
Will Deacon45ae7cf2013-06-24 18:31:25 +01001386static void arm_smmu_remove_device(struct device *dev)
1387{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001388 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001389 struct arm_smmu_master_cfg *cfg;
1390 struct arm_smmu_device *smmu;
Sricharan Rd4a44f02018-12-04 11:52:10 +05301391 int ret;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001392
Robin Murphyadfec2e2016-09-12 17:13:55 +01001393 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyf80cd882016-09-14 15:21:39 +01001394 return;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001395
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001396 cfg = fwspec->iommu_priv;
1397 smmu = cfg->smmu;
1398
Sricharan Rd4a44f02018-12-04 11:52:10 +05301399 ret = arm_smmu_rpm_get(smmu);
1400 if (ret < 0)
1401 return;
1402
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001403 iommu_device_unlink(&smmu->iommu, dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001404 arm_smmu_master_free_smes(fwspec);
Sricharan Rd4a44f02018-12-04 11:52:10 +05301405
1406 arm_smmu_rpm_put(smmu);
1407
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001408 iommu_group_remove_device(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001409 kfree(fwspec->iommu_priv);
1410 iommu_fwspec_free(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001411}
1412
Joerg Roedelaf659932015-10-21 23:51:41 +02001413static struct iommu_group *arm_smmu_device_group(struct device *dev)
1414{
Joerg Roedel9b468f72018-11-29 14:01:00 +01001415 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001416 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy588888a2016-09-12 17:13:54 +01001417 struct iommu_group *group = NULL;
1418 int i, idx;
1419
Robin Murphyadfec2e2016-09-12 17:13:55 +01001420 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001421 if (group && smmu->s2crs[idx].group &&
1422 group != smmu->s2crs[idx].group)
1423 return ERR_PTR(-EINVAL);
1424
1425 group = smmu->s2crs[idx].group;
1426 }
1427
1428 if (group)
Robin Murphye1b44cb2016-11-11 17:59:22 +00001429 return iommu_group_ref_get(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001430
1431 if (dev_is_pci(dev))
1432 group = pci_device_group(dev);
Nipun Guptaeab03e22018-09-10 19:19:18 +05301433 else if (dev_is_fsl_mc(dev))
1434 group = fsl_mc_device_group(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001435 else
1436 group = generic_device_group(dev);
1437
Joerg Roedelaf659932015-10-21 23:51:41 +02001438 return group;
1439}
1440
Will Deaconc752ce42014-06-25 22:46:31 +01001441static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1442 enum iommu_attr attr, void *data)
1443{
Joerg Roedel1d672632015-03-26 13:43:10 +01001444 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001445
Robin Murphy44f68762018-09-20 17:10:27 +01001446 switch(domain->type) {
1447 case IOMMU_DOMAIN_UNMANAGED:
1448 switch (attr) {
1449 case DOMAIN_ATTR_NESTING:
1450 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1451 return 0;
1452 default:
1453 return -ENODEV;
1454 }
1455 break;
1456 case IOMMU_DOMAIN_DMA:
1457 switch (attr) {
1458 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1459 *(int *)data = smmu_domain->non_strict;
1460 return 0;
1461 default:
1462 return -ENODEV;
1463 }
1464 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001465 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001466 return -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001467 }
1468}
1469
1470static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1471 enum iommu_attr attr, void *data)
1472{
Will Deacon518f7132014-11-14 17:17:54 +00001473 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001474 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001475
Will Deacon518f7132014-11-14 17:17:54 +00001476 mutex_lock(&smmu_domain->init_mutex);
1477
Robin Murphy44f68762018-09-20 17:10:27 +01001478 switch(domain->type) {
1479 case IOMMU_DOMAIN_UNMANAGED:
1480 switch (attr) {
1481 case DOMAIN_ATTR_NESTING:
1482 if (smmu_domain->smmu) {
1483 ret = -EPERM;
1484 goto out_unlock;
1485 }
1486
1487 if (*(int *)data)
1488 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1489 else
1490 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1491 break;
1492 default:
1493 ret = -ENODEV;
Will Deacon518f7132014-11-14 17:17:54 +00001494 }
Robin Murphy44f68762018-09-20 17:10:27 +01001495 break;
1496 case IOMMU_DOMAIN_DMA:
1497 switch (attr) {
1498 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1499 smmu_domain->non_strict = *(int *)data;
1500 break;
1501 default:
1502 ret = -ENODEV;
1503 }
Will Deacon518f7132014-11-14 17:17:54 +00001504 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001505 default:
Robin Murphy44f68762018-09-20 17:10:27 +01001506 ret = -EINVAL;
Will Deaconc752ce42014-06-25 22:46:31 +01001507 }
Will Deacon518f7132014-11-14 17:17:54 +00001508out_unlock:
1509 mutex_unlock(&smmu_domain->init_mutex);
1510 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001511}
1512
Robin Murphy021bb842016-09-14 15:26:46 +01001513static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1514{
Robin Murphy56fbf602017-03-31 12:03:33 +01001515 u32 mask, fwid = 0;
Robin Murphy021bb842016-09-14 15:26:46 +01001516
1517 if (args->args_count > 0)
Robin Murphy0caf5f42019-08-15 19:37:23 +01001518 fwid |= FIELD_PREP(SMR_ID, args->args[0]);
Robin Murphy021bb842016-09-14 15:26:46 +01001519
1520 if (args->args_count > 1)
Robin Murphy0caf5f42019-08-15 19:37:23 +01001521 fwid |= FIELD_PREP(SMR_MASK, args->args[1]);
Robin Murphy56fbf602017-03-31 12:03:33 +01001522 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
Robin Murphy0caf5f42019-08-15 19:37:23 +01001523 fwid |= FIELD_PREP(SMR_MASK, mask);
Robin Murphy021bb842016-09-14 15:26:46 +01001524
1525 return iommu_fwspec_add_ids(dev, &fwid, 1);
1526}
1527
Eric Augerf3ebee82017-01-19 20:57:55 +00001528static void arm_smmu_get_resv_regions(struct device *dev,
1529 struct list_head *head)
1530{
1531 struct iommu_resv_region *region;
1532 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1533
1534 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00001535 prot, IOMMU_RESV_SW_MSI);
Eric Augerf3ebee82017-01-19 20:57:55 +00001536 if (!region)
1537 return;
1538
1539 list_add_tail(&region->list, head);
Robin Murphy273df962017-03-16 17:00:19 +00001540
1541 iommu_dma_get_resv_regions(dev, head);
Eric Augerf3ebee82017-01-19 20:57:55 +00001542}
1543
1544static void arm_smmu_put_resv_regions(struct device *dev,
1545 struct list_head *head)
1546{
1547 struct iommu_resv_region *entry, *next;
1548
1549 list_for_each_entry_safe(entry, next, head, list)
1550 kfree(entry);
1551}
1552
Will Deacon518f7132014-11-14 17:17:54 +00001553static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001554 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001555 .domain_alloc = arm_smmu_domain_alloc,
1556 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001557 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001558 .map = arm_smmu_map,
1559 .unmap = arm_smmu_unmap,
Robin Murphy44f68762018-09-20 17:10:27 +01001560 .flush_iotlb_all = arm_smmu_flush_iotlb_all,
Robin Murphy32b12442017-09-28 15:55:01 +01001561 .iotlb_sync = arm_smmu_iotlb_sync,
Will Deaconc752ce42014-06-25 22:46:31 +01001562 .iova_to_phys = arm_smmu_iova_to_phys,
1563 .add_device = arm_smmu_add_device,
1564 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001565 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001566 .domain_get_attr = arm_smmu_domain_get_attr,
1567 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy021bb842016-09-14 15:26:46 +01001568 .of_xlate = arm_smmu_of_xlate,
Eric Augerf3ebee82017-01-19 20:57:55 +00001569 .get_resv_regions = arm_smmu_get_resv_regions,
1570 .put_resv_regions = arm_smmu_put_resv_regions,
Will Deacon518f7132014-11-14 17:17:54 +00001571 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001572};
1573
1574static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1575{
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001576 int i;
Peng Fan3ca37122016-05-03 21:50:30 +08001577 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001578
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001579 /* clear global FSR */
Robin Murphy00320ce2019-08-15 19:37:31 +01001580 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
1581 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001582
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001583 /*
1584 * Reset stream mapping groups: Initial values mark all SMRn as
1585 * invalid and all S2CRn as bypass unless overridden.
1586 */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001587 for (i = 0; i < smmu->num_mapping_groups; ++i)
1588 arm_smmu_write_sme(smmu, i);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001589
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301590 if (smmu->model == ARM_MMU500) {
1591 /*
1592 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1593 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1594 * bit is only present in MMU-500r2 onwards.
1595 */
Robin Murphy00320ce2019-08-15 19:37:31 +01001596 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID7);
Robin Murphy0caf5f42019-08-15 19:37:23 +01001597 major = FIELD_GET(ID7_MAJOR, reg);
Robin Murphy00320ce2019-08-15 19:37:31 +01001598 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sACR);
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301599 if (major >= 2)
1600 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1601 /*
1602 * Allow unmatched Stream IDs to allocate bypass
1603 * TLB entries for reduced latency.
1604 */
Feng Kan74f55d32017-10-11 15:08:39 -07001605 reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN;
Robin Murphy00320ce2019-08-15 19:37:31 +01001606 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sACR, reg);
Peng Fan3ca37122016-05-03 21:50:30 +08001607 }
1608
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001609 /* Make sure all context banks are disabled and clear CB_FSR */
1610 for (i = 0; i < smmu->num_context_banks; ++i) {
Robin Murphy90df3732017-08-08 14:56:14 +01001611 arm_smmu_write_context_bank(smmu, i);
Robin Murphy19713fd2019-08-15 19:37:30 +01001612 arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, FSR_FAULT);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001613 /*
1614 * Disable MMU-500's not-particularly-beneficial next-page
1615 * prefetcher for the sake of errata #841119 and #826419.
1616 */
1617 if (smmu->model == ARM_MMU500) {
Robin Murphy19713fd2019-08-15 19:37:30 +01001618 reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001619 reg &= ~ARM_MMU500_ACTLR_CPRE;
Robin Murphy19713fd2019-08-15 19:37:30 +01001620 arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_ACTLR, reg);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001621 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001622 }
Will Deacon1463fe42013-07-31 19:21:27 +01001623
Will Deacon45ae7cf2013-06-24 18:31:25 +01001624 /* Invalidate the TLB, just in case */
Robin Murphy00320ce2019-08-15 19:37:31 +01001625 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLH, QCOM_DUMMY_VAL);
1626 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLNSNH, QCOM_DUMMY_VAL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001627
Robin Murphy00320ce2019-08-15 19:37:31 +01001628 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001629
Will Deacon45ae7cf2013-06-24 18:31:25 +01001630 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001631 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001632
1633 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001634 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001635
Robin Murphy25a1c962016-02-10 14:25:33 +00001636 /* Enable client access, handling unmatched streams as appropriate */
1637 reg &= ~sCR0_CLIENTPD;
1638 if (disable_bypass)
1639 reg |= sCR0_USFCFG;
1640 else
1641 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001642
1643 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001644 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001645
1646 /* Don't upgrade barriers */
Robin Murphy0caf5f42019-08-15 19:37:23 +01001647 reg &= ~(sCR0_BSU);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001648
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001649 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1650 reg |= sCR0_VMID16EN;
1651
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001652 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1653 reg |= sCR0_EXIDENABLE;
1654
Will Deacon45ae7cf2013-06-24 18:31:25 +01001655 /* Push the button */
Robin Murphy11febfc2017-03-30 17:56:31 +01001656 arm_smmu_tlb_sync_global(smmu);
Robin Murphy00320ce2019-08-15 19:37:31 +01001657 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001658}
1659
1660static int arm_smmu_id_size_to_bits(int size)
1661{
1662 switch (size) {
1663 case 0:
1664 return 32;
1665 case 1:
1666 return 36;
1667 case 2:
1668 return 40;
1669 case 3:
1670 return 42;
1671 case 4:
1672 return 44;
1673 case 5:
1674 default:
1675 return 48;
1676 }
1677}
1678
1679static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1680{
Robin Murphy490325e2019-08-15 19:37:26 +01001681 unsigned int size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001682 u32 id;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001683 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001684 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001685
1686 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001687 dev_notice(smmu->dev, "SMMUv%d with:\n",
1688 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001689
1690 /* ID0 */
Robin Murphy00320ce2019-08-15 19:37:31 +01001691 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001692
1693 /* Restrict available stages based on module parameter */
1694 if (force_stage == 1)
1695 id &= ~(ID0_S2TS | ID0_NTS);
1696 else if (force_stage == 2)
1697 id &= ~(ID0_S1TS | ID0_NTS);
1698
Will Deacon45ae7cf2013-06-24 18:31:25 +01001699 if (id & ID0_S1TS) {
1700 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1701 dev_notice(smmu->dev, "\tstage 1 translation\n");
1702 }
1703
1704 if (id & ID0_S2TS) {
1705 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1706 dev_notice(smmu->dev, "\tstage 2 translation\n");
1707 }
1708
1709 if (id & ID0_NTS) {
1710 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1711 dev_notice(smmu->dev, "\tnested translation\n");
1712 }
1713
1714 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001715 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001716 dev_err(smmu->dev, "\tno translation support!\n");
1717 return -ENODEV;
1718 }
1719
Robin Murphyb7862e32016-04-13 18:13:03 +01001720 if ((id & ID0_S1TS) &&
1721 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001722 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1723 dev_notice(smmu->dev, "\taddress translation ops\n");
1724 }
1725
Robin Murphybae2c2d2015-07-29 19:46:05 +01001726 /*
1727 * In order for DMA API calls to work properly, we must defer to what
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001728 * the FW says about coherency, regardless of what the hardware claims.
Robin Murphybae2c2d2015-07-29 19:46:05 +01001729 * Fortunately, this also opens up a workaround for systems where the
1730 * ID register value has ended up configured incorrectly.
1731 */
Robin Murphybae2c2d2015-07-29 19:46:05 +01001732 cttw_reg = !!(id & ID0_CTTW);
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001733 if (cttw_fw || cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001734 dev_notice(smmu->dev, "\t%scoherent table walk\n",
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001735 cttw_fw ? "" : "non-");
1736 if (cttw_fw != cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001737 dev_notice(smmu->dev,
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001738 "\t(IDR0.CTTW overridden by FW configuration)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001739
Robin Murphy21174242016-09-12 17:13:48 +01001740 /* Max. number of entries we have for stream matching/indexing */
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001741 if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
1742 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1743 size = 1 << 16;
1744 } else {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001745 size = 1 << FIELD_GET(ID0_NUMSIDB, id);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001746 }
Robin Murphy21174242016-09-12 17:13:48 +01001747 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001748 if (id & ID0_SMS) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001749 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy0caf5f42019-08-15 19:37:23 +01001750 size = FIELD_GET(ID0_NUMSMRG, id);
Robin Murphy21174242016-09-12 17:13:48 +01001751 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001752 dev_err(smmu->dev,
1753 "stream-matching supported, but no SMRs present!\n");
1754 return -ENODEV;
1755 }
1756
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001757 /* Zero-initialised to mark as invalid */
1758 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1759 GFP_KERNEL);
1760 if (!smmu->smrs)
1761 return -ENOMEM;
1762
Will Deacon45ae7cf2013-06-24 18:31:25 +01001763 dev_notice(smmu->dev,
Robin Murphy490325e2019-08-15 19:37:26 +01001764 "\tstream matching with %u register groups", size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001765 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001766 /* s2cr->type == 0 means translation, so initialise explicitly */
1767 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1768 GFP_KERNEL);
1769 if (!smmu->s2crs)
1770 return -ENOMEM;
1771 for (i = 0; i < size; i++)
1772 smmu->s2crs[i] = s2cr_init_val;
1773
Robin Murphy21174242016-09-12 17:13:48 +01001774 smmu->num_mapping_groups = size;
Robin Murphy588888a2016-09-12 17:13:54 +01001775 mutex_init(&smmu->stream_map_mutex);
Will Deacon8e517e72017-07-06 15:55:48 +01001776 spin_lock_init(&smmu->global_sync_lock);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001777
Robin Murphy7602b872016-04-28 17:12:09 +01001778 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1779 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1780 if (!(id & ID0_PTFS_NO_AARCH32S))
1781 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1782 }
1783
Will Deacon45ae7cf2013-06-24 18:31:25 +01001784 /* ID1 */
Robin Murphy00320ce2019-08-15 19:37:31 +01001785 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001786 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001787
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001788 /* Check for size mismatch of SMMU address space from mapped region */
Robin Murphy0caf5f42019-08-15 19:37:23 +01001789 size = 1 << (FIELD_GET(ID1_NUMPAGENDXB, id) + 1);
Robin Murphy490325e2019-08-15 19:37:26 +01001790 if (smmu->numpage != 2 * size << smmu->pgshift)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001791 dev_warn(smmu->dev,
Robin Murphy490325e2019-08-15 19:37:26 +01001792 "SMMU address space size (0x%x) differs from mapped region size (0x%x)!\n",
1793 2 * size << smmu->pgshift, smmu->numpage);
1794 /* Now properly encode NUMPAGE to subsequently derive SMMU_CB_BASE */
1795 smmu->numpage = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001796
Robin Murphy0caf5f42019-08-15 19:37:23 +01001797 smmu->num_s2_context_banks = FIELD_GET(ID1_NUMS2CB, id);
1798 smmu->num_context_banks = FIELD_GET(ID1_NUMCB, id);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001799 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1800 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1801 return -ENODEV;
1802 }
1803 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1804 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphy90df3732017-08-08 14:56:14 +01001805 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
1806 sizeof(*smmu->cbs), GFP_KERNEL);
1807 if (!smmu->cbs)
1808 return -ENOMEM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001809
1810 /* ID2 */
Robin Murphy00320ce2019-08-15 19:37:31 +01001811 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2);
Robin Murphy0caf5f42019-08-15 19:37:23 +01001812 size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_IAS, id));
Will Deacon518f7132014-11-14 17:17:54 +00001813 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001814
Will Deacon518f7132014-11-14 17:17:54 +00001815 /* The output mask is also applied for bypass */
Robin Murphy0caf5f42019-08-15 19:37:23 +01001816 size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_OAS, id));
Will Deacon518f7132014-11-14 17:17:54 +00001817 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001818
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001819 if (id & ID2_VMID16)
1820 smmu->features |= ARM_SMMU_FEAT_VMID16;
1821
Robin Murphyf1d84542015-03-04 16:41:05 +00001822 /*
1823 * What the page table walker can address actually depends on which
1824 * descriptor format is in use, but since a) we don't know that yet,
1825 * and b) it can vary per context bank, this will have to do...
1826 */
1827 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1828 dev_warn(smmu->dev,
1829 "failed to set DMA mask for table walker\n");
1830
Robin Murphyb7862e32016-04-13 18:13:03 +01001831 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001832 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001833 if (smmu->version == ARM_SMMU_V1_64K)
1834 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001835 } else {
Robin Murphy0caf5f42019-08-15 19:37:23 +01001836 size = FIELD_GET(ID2_UBS, id);
Will Deacon518f7132014-11-14 17:17:54 +00001837 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001838 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001839 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001840 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001841 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001842 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001843 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001844 }
1845
Robin Murphy7602b872016-04-28 17:12:09 +01001846 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01001847 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01001848 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01001849 if (smmu->features &
1850 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01001851 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01001852 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01001853 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01001854 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01001855 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01001856
Robin Murphyd5466352016-05-09 17:20:09 +01001857 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1858 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1859 else
1860 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1861 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1862 smmu->pgsize_bitmap);
1863
Will Deacon518f7132014-11-14 17:17:54 +00001864
Will Deacon28d60072014-09-01 16:24:48 +01001865 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1866 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001867 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001868
1869 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1870 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001871 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001872
Robin Murphy3995e182019-08-15 19:37:35 +01001873 if (smmu->impl && smmu->impl->cfg_probe)
1874 return smmu->impl->cfg_probe(smmu);
1875
Will Deacon45ae7cf2013-06-24 18:31:25 +01001876 return 0;
1877}
1878
Robin Murphy67b65a32016-04-13 18:12:57 +01001879struct arm_smmu_match_data {
1880 enum arm_smmu_arch_version version;
1881 enum arm_smmu_implementation model;
1882};
1883
1884#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
Sricharan R96a299d2018-12-04 11:52:09 +05301885static const struct arm_smmu_match_data name = { .version = ver, .model = imp }
Robin Murphy67b65a32016-04-13 18:12:57 +01001886
1887ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1888ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01001889ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001890ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01001891ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Vivek Gautam89cddc52018-12-04 11:52:13 +05301892ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01001893
Joerg Roedel09b52692014-10-02 12:24:45 +02001894static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01001895 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1896 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1897 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01001898 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001899 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01001900 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Vivek Gautam89cddc52018-12-04 11:52:13 +05301901 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01001902 { },
1903};
Robin Murphy09360402014-08-28 17:51:59 +01001904
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001905#ifdef CONFIG_ACPI
1906static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
1907{
1908 int ret = 0;
1909
1910 switch (model) {
1911 case ACPI_IORT_SMMU_V1:
1912 case ACPI_IORT_SMMU_CORELINK_MMU400:
1913 smmu->version = ARM_SMMU_V1;
1914 smmu->model = GENERIC_SMMU;
1915 break;
Robin Murphy84c24372017-06-19 16:41:56 +01001916 case ACPI_IORT_SMMU_CORELINK_MMU401:
1917 smmu->version = ARM_SMMU_V1_64K;
1918 smmu->model = GENERIC_SMMU;
1919 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001920 case ACPI_IORT_SMMU_V2:
1921 smmu->version = ARM_SMMU_V2;
1922 smmu->model = GENERIC_SMMU;
1923 break;
1924 case ACPI_IORT_SMMU_CORELINK_MMU500:
1925 smmu->version = ARM_SMMU_V2;
1926 smmu->model = ARM_MMU500;
1927 break;
Robin Murphy84c24372017-06-19 16:41:56 +01001928 case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
1929 smmu->version = ARM_SMMU_V2;
1930 smmu->model = CAVIUM_SMMUV2;
1931 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001932 default:
1933 ret = -ENODEV;
1934 }
1935
1936 return ret;
1937}
1938
1939static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
1940 struct arm_smmu_device *smmu)
1941{
1942 struct device *dev = smmu->dev;
1943 struct acpi_iort_node *node =
1944 *(struct acpi_iort_node **)dev_get_platdata(dev);
1945 struct acpi_iort_smmu *iort_smmu;
1946 int ret;
1947
1948 /* Retrieve SMMU1/2 specific data */
1949 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
1950
1951 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
1952 if (ret < 0)
1953 return ret;
1954
1955 /* Ignore the configuration access interrupt */
1956 smmu->num_global_irqs = 1;
1957
1958 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
1959 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1960
1961 return 0;
1962}
1963#else
1964static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
1965 struct arm_smmu_device *smmu)
1966{
1967 return -ENODEV;
1968}
1969#endif
1970
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001971static int arm_smmu_device_dt_probe(struct platform_device *pdev,
1972 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001973{
Robin Murphy67b65a32016-04-13 18:12:57 +01001974 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001975 struct device *dev = &pdev->dev;
Robin Murphy021bb842016-09-14 15:26:46 +01001976 bool legacy_binding;
1977
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001978 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1979 &smmu->num_global_irqs)) {
1980 dev_err(dev, "missing #global-interrupts property\n");
1981 return -ENODEV;
1982 }
1983
1984 data = of_device_get_match_data(dev);
1985 smmu->version = data->version;
1986 smmu->model = data->model;
1987
Robin Murphy021bb842016-09-14 15:26:46 +01001988 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
1989 if (legacy_binding && !using_generic_binding) {
1990 if (!using_legacy_binding)
1991 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
1992 using_legacy_binding = true;
1993 } else if (!legacy_binding && !using_legacy_binding) {
1994 using_generic_binding = true;
1995 } else {
1996 dev_err(dev, "not probing due to mismatched DT properties\n");
1997 return -ENODEV;
1998 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001999
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002000 if (of_dma_is_coherent(dev->of_node))
2001 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2002
2003 return 0;
2004}
2005
Robin Murphyf6810c12017-04-10 16:51:05 +05302006static void arm_smmu_bus_init(void)
2007{
2008 /* Oh, for a proper bus abstraction */
2009 if (!iommu_present(&platform_bus_type))
2010 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2011#ifdef CONFIG_ARM_AMBA
2012 if (!iommu_present(&amba_bustype))
2013 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2014#endif
2015#ifdef CONFIG_PCI
2016 if (!iommu_present(&pci_bus_type)) {
2017 pci_request_acs();
2018 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2019 }
2020#endif
Nipun Guptaeab03e22018-09-10 19:19:18 +05302021#ifdef CONFIG_FSL_MC_BUS
2022 if (!iommu_present(&fsl_mc_bus_type))
2023 bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
2024#endif
Robin Murphyf6810c12017-04-10 16:51:05 +05302025}
2026
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002027static int arm_smmu_device_probe(struct platform_device *pdev)
2028{
2029 struct resource *res;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002030 resource_size_t ioaddr;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002031 struct arm_smmu_device *smmu;
2032 struct device *dev = &pdev->dev;
2033 int num_irqs, i, err;
2034
Will Deacon45ae7cf2013-06-24 18:31:25 +01002035 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2036 if (!smmu) {
2037 dev_err(dev, "failed to allocate arm_smmu_device\n");
2038 return -ENOMEM;
2039 }
2040 smmu->dev = dev;
2041
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002042 if (dev->of_node)
2043 err = arm_smmu_device_dt_probe(pdev, smmu);
2044 else
2045 err = arm_smmu_device_acpi_probe(pdev, smmu);
2046
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002047 if (err)
2048 return err;
Robin Murphy09360402014-08-28 17:51:59 +01002049
Robin Murphyfc058d32019-08-15 19:37:33 +01002050 smmu = arm_smmu_impl_init(smmu);
2051 if (IS_ERR(smmu))
2052 return PTR_ERR(smmu);
2053
Will Deacon45ae7cf2013-06-24 18:31:25 +01002054 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002055 ioaddr = res->start;
Julia Lawall8a7f4312013-08-19 12:20:37 +01002056 smmu->base = devm_ioremap_resource(dev, res);
2057 if (IS_ERR(smmu->base))
2058 return PTR_ERR(smmu->base);
Robin Murphy490325e2019-08-15 19:37:26 +01002059 /*
2060 * The resource size should effectively match the value of SMMU_TOP;
2061 * stash that temporarily until we know PAGESIZE to validate it with.
2062 */
2063 smmu->numpage = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002064
Will Deacon45ae7cf2013-06-24 18:31:25 +01002065 num_irqs = 0;
2066 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2067 num_irqs++;
2068 if (num_irqs > smmu->num_global_irqs)
2069 smmu->num_context_irqs++;
2070 }
2071
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002072 if (!smmu->num_context_irqs) {
2073 dev_err(dev, "found %d interrupts but expected at least %d\n",
2074 num_irqs, smmu->num_global_irqs + 1);
2075 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002076 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002077
Kees Cooka86854d2018-06-12 14:07:58 -07002078 smmu->irqs = devm_kcalloc(dev, num_irqs, sizeof(*smmu->irqs),
Will Deacon45ae7cf2013-06-24 18:31:25 +01002079 GFP_KERNEL);
2080 if (!smmu->irqs) {
2081 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2082 return -ENOMEM;
2083 }
2084
2085 for (i = 0; i < num_irqs; ++i) {
2086 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002087
Will Deacon45ae7cf2013-06-24 18:31:25 +01002088 if (irq < 0) {
2089 dev_err(dev, "failed to get irq index %d\n", i);
2090 return -ENODEV;
2091 }
2092 smmu->irqs[i] = irq;
2093 }
2094
Sricharan R96a299d2018-12-04 11:52:09 +05302095 err = devm_clk_bulk_get_all(dev, &smmu->clks);
2096 if (err < 0) {
2097 dev_err(dev, "failed to get clocks %d\n", err);
2098 return err;
2099 }
2100 smmu->num_clks = err;
2101
2102 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
2103 if (err)
2104 return err;
2105
Olav Haugan3c8766d2014-08-22 17:12:32 -07002106 err = arm_smmu_device_cfg_probe(smmu);
2107 if (err)
2108 return err;
2109
Vivek Gautamd1e20222018-07-19 23:23:56 +05302110 if (smmu->version == ARM_SMMU_V2) {
2111 if (smmu->num_context_banks > smmu->num_context_irqs) {
2112 dev_err(dev,
2113 "found only %d context irq(s) but %d required\n",
2114 smmu->num_context_irqs, smmu->num_context_banks);
2115 return -ENODEV;
2116 }
2117
2118 /* Ignore superfluous interrupts */
2119 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002120 }
2121
Will Deacon45ae7cf2013-06-24 18:31:25 +01002122 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08002123 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2124 arm_smmu_global_fault,
2125 IRQF_SHARED,
2126 "arm-smmu global fault",
2127 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002128 if (err) {
2129 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2130 i, smmu->irqs[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01002131 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002132 }
2133 }
2134
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002135 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2136 "smmu.%pa", &ioaddr);
2137 if (err) {
2138 dev_err(dev, "Failed to register iommu in sysfs\n");
2139 return err;
2140 }
2141
2142 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2143 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2144
2145 err = iommu_device_register(&smmu->iommu);
2146 if (err) {
2147 dev_err(dev, "Failed to register iommu\n");
2148 return err;
2149 }
2150
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002151 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01002152 arm_smmu_device_reset(smmu);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03002153 arm_smmu_test_smr_masks(smmu);
Robin Murphy021bb842016-09-14 15:26:46 +01002154
Robin Murphyf6810c12017-04-10 16:51:05 +05302155 /*
Sricharan Rd4a44f02018-12-04 11:52:10 +05302156 * We want to avoid touching dev->power.lock in fastpaths unless
2157 * it's really going to do something useful - pm_runtime_enabled()
2158 * can serve as an ideal proxy for that decision. So, conditionally
2159 * enable pm_runtime.
2160 */
2161 if (dev->pm_domain) {
2162 pm_runtime_set_active(dev);
2163 pm_runtime_enable(dev);
2164 }
2165
2166 /*
Robin Murphyf6810c12017-04-10 16:51:05 +05302167 * For ACPI and generic DT bindings, an SMMU will be probed before
2168 * any device which might need it, so we want the bus ops in place
2169 * ready to handle default domain setup as soon as any SMMU exists.
2170 */
2171 if (!using_legacy_binding)
2172 arm_smmu_bus_init();
2173
Will Deacon45ae7cf2013-06-24 18:31:25 +01002174 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002175}
2176
Robin Murphyf6810c12017-04-10 16:51:05 +05302177/*
2178 * With the legacy DT binding in play, though, we have no guarantees about
2179 * probe order, but then we're also not doing default domains, so we can
2180 * delay setting bus ops until we're sure every possible SMMU is ready,
2181 * and that way ensure that no add_device() calls get missed.
2182 */
2183static int arm_smmu_legacy_bus_init(void)
2184{
2185 if (using_legacy_binding)
2186 arm_smmu_bus_init();
2187 return 0;
2188}
2189device_initcall_sync(arm_smmu_legacy_bus_init);
2190
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002191static void arm_smmu_device_shutdown(struct platform_device *pdev)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002192{
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002193 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002194
2195 if (!smmu)
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002196 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002197
Will Deaconecfadb62013-07-31 19:21:28 +01002198 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002199 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002200
Sricharan Rd4a44f02018-12-04 11:52:10 +05302201 arm_smmu_rpm_get(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002202 /* Turn the thing off */
Robin Murphy00320ce2019-08-15 19:37:31 +01002203 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, sCR0_CLIENTPD);
Sricharan Rd4a44f02018-12-04 11:52:10 +05302204 arm_smmu_rpm_put(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302205
Sricharan Rd4a44f02018-12-04 11:52:10 +05302206 if (pm_runtime_enabled(smmu->dev))
2207 pm_runtime_force_suspend(smmu->dev);
2208 else
2209 clk_bulk_disable(smmu->num_clks, smmu->clks);
2210
2211 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
Nate Watterson7aa86192017-06-29 18:18:15 -04002212}
2213
Sricharan R96a299d2018-12-04 11:52:09 +05302214static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
Robin Murphya2d866f2017-08-08 14:56:15 +01002215{
2216 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
Sricharan R96a299d2018-12-04 11:52:09 +05302217 int ret;
2218
2219 ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
2220 if (ret)
2221 return ret;
Robin Murphya2d866f2017-08-08 14:56:15 +01002222
2223 arm_smmu_device_reset(smmu);
Sricharan R96a299d2018-12-04 11:52:09 +05302224
Will Deacon45ae7cf2013-06-24 18:31:25 +01002225 return 0;
2226}
2227
Sricharan R96a299d2018-12-04 11:52:09 +05302228static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
Dan Carpenter6614ee72013-08-21 09:34:20 +01002229{
Sricharan R96a299d2018-12-04 11:52:09 +05302230 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2231
2232 clk_bulk_disable(smmu->num_clks, smmu->clks);
2233
2234 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002235}
2236
Robin Murphya2d866f2017-08-08 14:56:15 +01002237static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
2238{
Sricharan R96a299d2018-12-04 11:52:09 +05302239 if (pm_runtime_suspended(dev))
2240 return 0;
Robin Murphya2d866f2017-08-08 14:56:15 +01002241
Sricharan R96a299d2018-12-04 11:52:09 +05302242 return arm_smmu_runtime_resume(dev);
Robin Murphya2d866f2017-08-08 14:56:15 +01002243}
2244
Sricharan R96a299d2018-12-04 11:52:09 +05302245static int __maybe_unused arm_smmu_pm_suspend(struct device *dev)
2246{
2247 if (pm_runtime_suspended(dev))
2248 return 0;
2249
2250 return arm_smmu_runtime_suspend(dev);
2251}
2252
2253static const struct dev_pm_ops arm_smmu_pm_ops = {
2254 SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_pm_suspend, arm_smmu_pm_resume)
2255 SET_RUNTIME_PM_OPS(arm_smmu_runtime_suspend,
2256 arm_smmu_runtime_resume, NULL)
2257};
Robin Murphya2d866f2017-08-08 14:56:15 +01002258
Will Deacon45ae7cf2013-06-24 18:31:25 +01002259static struct platform_driver arm_smmu_driver = {
2260 .driver = {
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002261 .name = "arm-smmu",
2262 .of_match_table = of_match_ptr(arm_smmu_of_match),
2263 .pm = &arm_smmu_pm_ops,
2264 .suppress_bind_attrs = true,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002265 },
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002266 .probe = arm_smmu_device_probe,
Nate Watterson7aa86192017-06-29 18:18:15 -04002267 .shutdown = arm_smmu_device_shutdown,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002268};
Paul Gortmakeraddb672f2018-12-01 14:19:16 -05002269builtin_platform_driver(arm_smmu_driver);