blob: d176df569af8fae92c7fd26628d3a4d6c480c463 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Rob Clark0ae349a2017-08-09 10:43:04 -04002/*
3 * IOMMU API for QCOM secure IOMMUs. Somewhat based on arm-smmu.c
4 *
Rob Clark0ae349a2017-08-09 10:43:04 -04005 * Copyright (C) 2013 ARM Limited
6 * Copyright (C) 2017 Red Hat
7 */
8
9#include <linux/atomic.h>
Robin Murphy620565a2019-08-15 19:37:25 +010010#include <linux/bitfield.h>
Rob Clark0ae349a2017-08-09 10:43:04 -040011#include <linux/clk.h>
12#include <linux/delay.h>
13#include <linux/dma-iommu.h>
14#include <linux/dma-mapping.h>
15#include <linux/err.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/io-64-nonatomic-hi-lo.h>
Rob Herringb77cf112019-02-05 10:37:31 -060019#include <linux/io-pgtable.h>
Rob Clark0ae349a2017-08-09 10:43:04 -040020#include <linux/iommu.h>
21#include <linux/iopoll.h>
22#include <linux/kconfig.h>
Paul Gortmakerf295cf22018-12-01 14:19:14 -050023#include <linux/init.h>
Rob Clark0ae349a2017-08-09 10:43:04 -040024#include <linux/mutex.h>
25#include <linux/of.h>
26#include <linux/of_address.h>
27#include <linux/of_device.h>
28#include <linux/of_iommu.h>
29#include <linux/platform_device.h>
30#include <linux/pm.h>
31#include <linux/pm_runtime.h>
32#include <linux/qcom_scm.h>
33#include <linux/slab.h>
34#include <linux/spinlock.h>
35
Robin Murphyc5fc6482019-08-15 19:37:32 +010036#include "arm-smmu.h"
Rob Clark0ae349a2017-08-09 10:43:04 -040037
38#define SMMU_INTR_SEL_NS 0x2000
39
40struct qcom_iommu_ctx;
41
42struct qcom_iommu_dev {
43 /* IOMMU core code handle */
44 struct iommu_device iommu;
45 struct device *dev;
46 struct clk *iface_clk;
47 struct clk *bus_clk;
48 void __iomem *local_base;
49 u32 sec_id;
50 u8 num_ctxs;
Gustavo A. R. Silvae26060e2020-02-12 18:14:01 -060051 struct qcom_iommu_ctx *ctxs[]; /* indexed by asid-1 */
Rob Clark0ae349a2017-08-09 10:43:04 -040052};
53
54struct qcom_iommu_ctx {
55 struct device *dev;
56 void __iomem *base;
57 bool secure_init;
58 u8 asid; /* asid and ctx bank # are 1:1 */
Rob Clark049541e2017-11-03 10:50:33 -060059 struct iommu_domain *domain;
Rob Clark0ae349a2017-08-09 10:43:04 -040060};
61
62struct qcom_iommu_domain {
63 struct io_pgtable_ops *pgtbl_ops;
64 spinlock_t pgtbl_lock;
65 struct mutex init_mutex; /* Protects iommu pointer */
66 struct iommu_domain domain;
67 struct qcom_iommu_dev *iommu;
Rob Clark1014a2f2020-07-20 08:52:17 -070068 struct iommu_fwspec *fwspec;
Rob Clark0ae349a2017-08-09 10:43:04 -040069};
70
71static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom)
72{
73 return container_of(dom, struct qcom_iommu_domain, domain);
74}
75
76static const struct iommu_ops qcom_iommu_ops;
77
Joerg Roedel09b5dff2020-03-26 16:08:39 +010078static struct qcom_iommu_dev * to_iommu(struct device *dev)
Rob Clark0ae349a2017-08-09 10:43:04 -040079{
Joerg Roedel09b5dff2020-03-26 16:08:39 +010080 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
81
Rob Clark0ae349a2017-08-09 10:43:04 -040082 if (!fwspec || fwspec->ops != &qcom_iommu_ops)
83 return NULL;
Joerg Roedel09b5dff2020-03-26 16:08:39 +010084
85 return dev_iommu_priv_get(dev);
Rob Clark0ae349a2017-08-09 10:43:04 -040086}
87
Rob Clark1014a2f2020-07-20 08:52:17 -070088static struct qcom_iommu_ctx * to_ctx(struct qcom_iommu_domain *d, unsigned asid)
Rob Clark0ae349a2017-08-09 10:43:04 -040089{
Rob Clark1014a2f2020-07-20 08:52:17 -070090 struct qcom_iommu_dev *qcom_iommu = d->iommu;
Rob Clark0ae349a2017-08-09 10:43:04 -040091 if (!qcom_iommu)
92 return NULL;
93 return qcom_iommu->ctxs[asid - 1];
94}
95
96static inline void
97iommu_writel(struct qcom_iommu_ctx *ctx, unsigned reg, u32 val)
98{
99 writel_relaxed(val, ctx->base + reg);
100}
101
102static inline void
103iommu_writeq(struct qcom_iommu_ctx *ctx, unsigned reg, u64 val)
104{
105 writeq_relaxed(val, ctx->base + reg);
106}
107
108static inline u32
109iommu_readl(struct qcom_iommu_ctx *ctx, unsigned reg)
110{
111 return readl_relaxed(ctx->base + reg);
112}
113
114static inline u64
115iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg)
116{
117 return readq_relaxed(ctx->base + reg);
118}
119
120static void qcom_iommu_tlb_sync(void *cookie)
121{
Rob Clark1014a2f2020-07-20 08:52:17 -0700122 struct qcom_iommu_domain *qcom_domain = cookie;
123 struct iommu_fwspec *fwspec = qcom_domain->fwspec;
Rob Clark0ae349a2017-08-09 10:43:04 -0400124 unsigned i;
125
126 for (i = 0; i < fwspec->num_ids; i++) {
Rob Clark1014a2f2020-07-20 08:52:17 -0700127 struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
Rob Clark0ae349a2017-08-09 10:43:04 -0400128 unsigned int val, ret;
129
130 iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0);
131
132 ret = readl_poll_timeout(ctx->base + ARM_SMMU_CB_TLBSTATUS, val,
133 (val & 0x1) == 0, 0, 5000000);
134 if (ret)
135 dev_err(ctx->dev, "timeout waiting for TLB SYNC\n");
136 }
137}
138
139static void qcom_iommu_tlb_inv_context(void *cookie)
140{
Rob Clark1014a2f2020-07-20 08:52:17 -0700141 struct qcom_iommu_domain *qcom_domain = cookie;
142 struct iommu_fwspec *fwspec = qcom_domain->fwspec;
Rob Clark0ae349a2017-08-09 10:43:04 -0400143 unsigned i;
144
145 for (i = 0; i < fwspec->num_ids; i++) {
Rob Clark1014a2f2020-07-20 08:52:17 -0700146 struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
Rob Clark0ae349a2017-08-09 10:43:04 -0400147 iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid);
148 }
149
150 qcom_iommu_tlb_sync(cookie);
151}
152
153static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size,
154 size_t granule, bool leaf, void *cookie)
155{
Rob Clark1014a2f2020-07-20 08:52:17 -0700156 struct qcom_iommu_domain *qcom_domain = cookie;
157 struct iommu_fwspec *fwspec = qcom_domain->fwspec;
Rob Clark0ae349a2017-08-09 10:43:04 -0400158 unsigned i, reg;
159
160 reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
161
162 for (i = 0; i < fwspec->num_ids; i++) {
Rob Clark1014a2f2020-07-20 08:52:17 -0700163 struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
Rob Clark0ae349a2017-08-09 10:43:04 -0400164 size_t s = size;
165
Robin Murphya5b396c2019-08-15 19:37:22 +0100166 iova = (iova >> 12) << 12;
Rob Clark0ae349a2017-08-09 10:43:04 -0400167 iova |= ctx->asid;
168 do {
169 iommu_writel(ctx, reg, iova);
170 iova += granule;
171 } while (s -= granule);
172 }
173}
174
Will Deacon05aed942019-07-02 16:44:25 +0100175static void qcom_iommu_tlb_flush_walk(unsigned long iova, size_t size,
176 size_t granule, void *cookie)
177{
178 qcom_iommu_tlb_inv_range_nosync(iova, size, granule, false, cookie);
179 qcom_iommu_tlb_sync(cookie);
180}
181
182static void qcom_iommu_tlb_flush_leaf(unsigned long iova, size_t size,
183 size_t granule, void *cookie)
184{
185 qcom_iommu_tlb_inv_range_nosync(iova, size, granule, true, cookie);
186 qcom_iommu_tlb_sync(cookie);
187}
188
Will Deacon3951c412019-07-02 16:45:15 +0100189static void qcom_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
190 unsigned long iova, size_t granule,
Will Deaconabfd6fe2019-07-02 16:44:41 +0100191 void *cookie)
192{
193 qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie);
194}
195
Will Deacon298f78892019-07-02 16:43:34 +0100196static const struct iommu_flush_ops qcom_flush_ops = {
Rob Clark0ae349a2017-08-09 10:43:04 -0400197 .tlb_flush_all = qcom_iommu_tlb_inv_context,
Will Deacon05aed942019-07-02 16:44:25 +0100198 .tlb_flush_walk = qcom_iommu_tlb_flush_walk,
199 .tlb_flush_leaf = qcom_iommu_tlb_flush_leaf,
Will Deaconabfd6fe2019-07-02 16:44:41 +0100200 .tlb_add_page = qcom_iommu_tlb_add_page,
Rob Clark0ae349a2017-08-09 10:43:04 -0400201};
202
203static irqreturn_t qcom_iommu_fault(int irq, void *dev)
204{
205 struct qcom_iommu_ctx *ctx = dev;
206 u32 fsr, fsynr;
207 u64 iova;
208
209 fsr = iommu_readl(ctx, ARM_SMMU_CB_FSR);
210
Will Deaconfba6e962020-01-10 13:20:03 +0000211 if (!(fsr & ARM_SMMU_FSR_FAULT))
Rob Clark0ae349a2017-08-09 10:43:04 -0400212 return IRQ_NONE;
213
214 fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0);
215 iova = iommu_readq(ctx, ARM_SMMU_CB_FAR);
216
Rob Clark049541e2017-11-03 10:50:33 -0600217 if (!report_iommu_fault(ctx->domain, ctx->dev, iova, 0)) {
218 dev_err_ratelimited(ctx->dev,
219 "Unhandled context fault: fsr=0x%x, "
220 "iova=0x%016llx, fsynr=0x%x, cb=%d\n",
221 fsr, iova, fsynr, ctx->asid);
222 }
Rob Clark0ae349a2017-08-09 10:43:04 -0400223
224 iommu_writel(ctx, ARM_SMMU_CB_FSR, fsr);
Will Deaconfba6e962020-01-10 13:20:03 +0000225 iommu_writel(ctx, ARM_SMMU_CB_RESUME, ARM_SMMU_RESUME_TERMINATE);
Rob Clark0ae349a2017-08-09 10:43:04 -0400226
227 return IRQ_HANDLED;
228}
229
230static int qcom_iommu_init_domain(struct iommu_domain *domain,
231 struct qcom_iommu_dev *qcom_iommu,
Joerg Roedel09b5dff2020-03-26 16:08:39 +0100232 struct device *dev)
Rob Clark0ae349a2017-08-09 10:43:04 -0400233{
234 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
Joerg Roedel09b5dff2020-03-26 16:08:39 +0100235 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Rob Clark0ae349a2017-08-09 10:43:04 -0400236 struct io_pgtable_ops *pgtbl_ops;
237 struct io_pgtable_cfg pgtbl_cfg;
238 int i, ret = 0;
239 u32 reg;
240
241 mutex_lock(&qcom_domain->init_mutex);
242 if (qcom_domain->iommu)
243 goto out_unlock;
244
245 pgtbl_cfg = (struct io_pgtable_cfg) {
246 .pgsize_bitmap = qcom_iommu_ops.pgsize_bitmap,
247 .ias = 32,
248 .oas = 40,
Will Deacon298f78892019-07-02 16:43:34 +0100249 .tlb = &qcom_flush_ops,
Rob Clark0ae349a2017-08-09 10:43:04 -0400250 .iommu_dev = qcom_iommu->dev,
251 };
252
253 qcom_domain->iommu = qcom_iommu;
Rob Clark1014a2f2020-07-20 08:52:17 -0700254 qcom_domain->fwspec = fwspec;
255
256 pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, qcom_domain);
Rob Clark0ae349a2017-08-09 10:43:04 -0400257 if (!pgtbl_ops) {
258 dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n");
259 ret = -ENOMEM;
260 goto out_clear_iommu;
261 }
262
263 /* Update the domain's page sizes to reflect the page table format */
264 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
265 domain->geometry.aperture_end = (1ULL << pgtbl_cfg.ias) - 1;
266 domain->geometry.force_aperture = true;
267
268 for (i = 0; i < fwspec->num_ids; i++) {
Rob Clark1014a2f2020-07-20 08:52:17 -0700269 struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
Rob Clark0ae349a2017-08-09 10:43:04 -0400270
271 if (!ctx->secure_init) {
272 ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid);
273 if (ret) {
274 dev_err(qcom_iommu->dev, "secure init failed: %d\n", ret);
275 goto out_clear_iommu;
276 }
277 ctx->secure_init = true;
278 }
279
280 /* TTBRs */
281 iommu_writeq(ctx, ARM_SMMU_CB_TTBR0,
Robin Murphyd1e5f262019-10-25 19:08:37 +0100282 pgtbl_cfg.arm_lpae_s1_cfg.ttbr |
Will Deaconfba6e962020-01-10 13:20:03 +0000283 FIELD_PREP(ARM_SMMU_TTBRn_ASID, ctx->asid));
Robin Murphyfb485eb2019-10-25 19:08:38 +0100284 iommu_writeq(ctx, ARM_SMMU_CB_TTBR1, 0);
Rob Clark0ae349a2017-08-09 10:43:04 -0400285
Robin Murphy620565a2019-08-15 19:37:25 +0100286 /* TCR */
287 iommu_writel(ctx, ARM_SMMU_CB_TCR2,
Robin Murphyfb485eb2019-10-25 19:08:38 +0100288 arm_smmu_lpae_tcr2(&pgtbl_cfg));
Robin Murphy620565a2019-08-15 19:37:25 +0100289 iommu_writel(ctx, ARM_SMMU_CB_TCR,
Will Deaconfba6e962020-01-10 13:20:03 +0000290 arm_smmu_lpae_tcr(&pgtbl_cfg) | ARM_SMMU_TCR_EAE);
Rob Clark0ae349a2017-08-09 10:43:04 -0400291
292 /* MAIRs (stage-1 only) */
293 iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR0,
Robin Murphy205577a2019-10-25 19:08:36 +0100294 pgtbl_cfg.arm_lpae_s1_cfg.mair);
Rob Clark0ae349a2017-08-09 10:43:04 -0400295 iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR1,
Robin Murphy205577a2019-10-25 19:08:36 +0100296 pgtbl_cfg.arm_lpae_s1_cfg.mair >> 32);
Rob Clark0ae349a2017-08-09 10:43:04 -0400297
298 /* SCTLR */
Will Deaconfba6e962020-01-10 13:20:03 +0000299 reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE |
300 ARM_SMMU_SCTLR_AFE | ARM_SMMU_SCTLR_TRE |
301 ARM_SMMU_SCTLR_M | ARM_SMMU_SCTLR_S1_ASIDPNE |
302 ARM_SMMU_SCTLR_CFCFG;
Rob Clark0ae349a2017-08-09 10:43:04 -0400303
304 if (IS_ENABLED(CONFIG_BIG_ENDIAN))
Will Deaconfba6e962020-01-10 13:20:03 +0000305 reg |= ARM_SMMU_SCTLR_E;
Rob Clark0ae349a2017-08-09 10:43:04 -0400306
307 iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg);
Rob Clark049541e2017-11-03 10:50:33 -0600308
309 ctx->domain = domain;
Rob Clark0ae349a2017-08-09 10:43:04 -0400310 }
311
312 mutex_unlock(&qcom_domain->init_mutex);
313
314 /* Publish page table ops for map/unmap */
315 qcom_domain->pgtbl_ops = pgtbl_ops;
316
317 return 0;
318
319out_clear_iommu:
320 qcom_domain->iommu = NULL;
321out_unlock:
322 mutex_unlock(&qcom_domain->init_mutex);
323 return ret;
324}
325
326static struct iommu_domain *qcom_iommu_domain_alloc(unsigned type)
327{
328 struct qcom_iommu_domain *qcom_domain;
329
330 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
331 return NULL;
332 /*
333 * Allocate the domain and initialise some of its data structures.
334 * We can't really do anything meaningful until we've added a
335 * master.
336 */
337 qcom_domain = kzalloc(sizeof(*qcom_domain), GFP_KERNEL);
338 if (!qcom_domain)
339 return NULL;
340
341 if (type == IOMMU_DOMAIN_DMA &&
342 iommu_get_dma_cookie(&qcom_domain->domain)) {
343 kfree(qcom_domain);
344 return NULL;
345 }
346
347 mutex_init(&qcom_domain->init_mutex);
348 spin_lock_init(&qcom_domain->pgtbl_lock);
349
350 return &qcom_domain->domain;
351}
352
353static void qcom_iommu_domain_free(struct iommu_domain *domain)
354{
355 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
356
Rob Clark0ae349a2017-08-09 10:43:04 -0400357 iommu_put_dma_cookie(domain);
358
Robin Murphyfaf305c2020-02-18 18:12:41 +0000359 if (qcom_domain->iommu) {
360 /*
361 * NOTE: unmap can be called after client device is powered
362 * off, for example, with GPUs or anything involving dma-buf.
363 * So we cannot rely on the device_link. Make sure the IOMMU
364 * is on to avoid unclocked accesses in the TLB inv path:
365 */
366 pm_runtime_get_sync(qcom_domain->iommu->dev);
367 free_io_pgtable_ops(qcom_domain->pgtbl_ops);
368 pm_runtime_put_sync(qcom_domain->iommu->dev);
369 }
Rob Clark0ae349a2017-08-09 10:43:04 -0400370
371 kfree(qcom_domain);
372}
373
374static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
375{
Joerg Roedel09b5dff2020-03-26 16:08:39 +0100376 struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
Rob Clark0ae349a2017-08-09 10:43:04 -0400377 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
378 int ret;
379
380 if (!qcom_iommu) {
381 dev_err(dev, "cannot attach to IOMMU, is it on the same bus?\n");
382 return -ENXIO;
383 }
384
385 /* Ensure that the domain is finalized */
386 pm_runtime_get_sync(qcom_iommu->dev);
Joerg Roedel09b5dff2020-03-26 16:08:39 +0100387 ret = qcom_iommu_init_domain(domain, qcom_iommu, dev);
Rob Clark0ae349a2017-08-09 10:43:04 -0400388 pm_runtime_put_sync(qcom_iommu->dev);
389 if (ret < 0)
390 return ret;
391
392 /*
393 * Sanity check the domain. We don't support domains across
394 * different IOMMUs.
395 */
396 if (qcom_domain->iommu != qcom_iommu) {
397 dev_err(dev, "cannot attach to IOMMU %s while already "
398 "attached to domain on IOMMU %s\n",
399 dev_name(qcom_domain->iommu->dev),
400 dev_name(qcom_iommu->dev));
401 return -EINVAL;
402 }
403
404 return 0;
405}
406
407static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *dev)
408{
Rob Clark0ae349a2017-08-09 10:43:04 -0400409 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
Joerg Roedel09b5dff2020-03-26 16:08:39 +0100410 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
411 struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
Rob Clark0ae349a2017-08-09 10:43:04 -0400412 unsigned i;
413
Robin Murphyfaf305c2020-02-18 18:12:41 +0000414 if (WARN_ON(!qcom_domain->iommu))
Rob Clark0ae349a2017-08-09 10:43:04 -0400415 return;
416
417 pm_runtime_get_sync(qcom_iommu->dev);
418 for (i = 0; i < fwspec->num_ids; i++) {
Rob Clark1014a2f2020-07-20 08:52:17 -0700419 struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
Rob Clark0ae349a2017-08-09 10:43:04 -0400420
421 /* Disable the context bank: */
422 iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
Rob Clark049541e2017-11-03 10:50:33 -0600423
424 ctx->domain = NULL;
Rob Clark0ae349a2017-08-09 10:43:04 -0400425 }
426 pm_runtime_put_sync(qcom_iommu->dev);
Rob Clark0ae349a2017-08-09 10:43:04 -0400427}
428
429static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
Tom Murphy781ca2d2019-09-08 09:56:38 -0700430 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
Rob Clark0ae349a2017-08-09 10:43:04 -0400431{
432 int ret;
433 unsigned long flags;
434 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
435 struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
436
437 if (!ops)
438 return -ENODEV;
439
440 spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
441 ret = ops->map(ops, iova, paddr, size, prot);
442 spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
443 return ret;
444}
445
446static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
Will Deacon56f8af52019-07-02 16:44:06 +0100447 size_t size, struct iommu_iotlb_gather *gather)
Rob Clark0ae349a2017-08-09 10:43:04 -0400448{
449 size_t ret;
450 unsigned long flags;
451 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
452 struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
453
454 if (!ops)
455 return 0;
456
457 /* NOTE: unmap can be called after client device is powered off,
458 * for example, with GPUs or anything involving dma-buf. So we
459 * cannot rely on the device_link. Make sure the IOMMU is on to
460 * avoid unclocked accesses in the TLB inv path:
461 */
462 pm_runtime_get_sync(qcom_domain->iommu->dev);
463 spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
Will Deacona2d3a382019-07-02 16:44:58 +0100464 ret = ops->unmap(ops, iova, size, gather);
Rob Clark0ae349a2017-08-09 10:43:04 -0400465 spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
466 pm_runtime_put_sync(qcom_domain->iommu->dev);
467
468 return ret;
469}
470
Will Deacon56f8af52019-07-02 16:44:06 +0100471static void qcom_iommu_flush_iotlb_all(struct iommu_domain *domain)
Robin Murphy4d689b62017-09-28 15:55:02 +0100472{
473 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
474 struct io_pgtable *pgtable = container_of(qcom_domain->pgtbl_ops,
475 struct io_pgtable, ops);
476 if (!qcom_domain->pgtbl_ops)
477 return;
478
479 pm_runtime_get_sync(qcom_domain->iommu->dev);
480 qcom_iommu_tlb_sync(pgtable->cookie);
481 pm_runtime_put_sync(qcom_domain->iommu->dev);
482}
483
Will Deacon56f8af52019-07-02 16:44:06 +0100484static void qcom_iommu_iotlb_sync(struct iommu_domain *domain,
485 struct iommu_iotlb_gather *gather)
486{
487 qcom_iommu_flush_iotlb_all(domain);
488}
489
Rob Clark0ae349a2017-08-09 10:43:04 -0400490static phys_addr_t qcom_iommu_iova_to_phys(struct iommu_domain *domain,
491 dma_addr_t iova)
492{
493 phys_addr_t ret;
494 unsigned long flags;
495 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
496 struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
497
498 if (!ops)
499 return 0;
500
501 spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
502 ret = ops->iova_to_phys(ops, iova);
503 spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
504
505 return ret;
506}
507
508static bool qcom_iommu_capable(enum iommu_cap cap)
509{
510 switch (cap) {
511 case IOMMU_CAP_CACHE_COHERENCY:
512 /*
513 * Return true here as the SMMU can always send out coherent
514 * requests.
515 */
516 return true;
517 case IOMMU_CAP_NOEXEC:
518 return true;
519 default:
520 return false;
521 }
522}
523
Joerg Roedelbfe3bd42020-04-29 15:37:02 +0200524static struct iommu_device *qcom_iommu_probe_device(struct device *dev)
Rob Clark0ae349a2017-08-09 10:43:04 -0400525{
Joerg Roedel09b5dff2020-03-26 16:08:39 +0100526 struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
Rob Clark0ae349a2017-08-09 10:43:04 -0400527 struct device_link *link;
528
529 if (!qcom_iommu)
Joerg Roedelbfe3bd42020-04-29 15:37:02 +0200530 return ERR_PTR(-ENODEV);
Rob Clark0ae349a2017-08-09 10:43:04 -0400531
532 /*
533 * Establish the link between iommu and master, so that the
534 * iommu gets runtime enabled/disabled as per the master's
535 * needs.
536 */
537 link = device_link_add(dev, qcom_iommu->dev, DL_FLAG_PM_RUNTIME);
538 if (!link) {
539 dev_err(qcom_iommu->dev, "Unable to create device link between %s and %s\n",
540 dev_name(qcom_iommu->dev), dev_name(dev));
Joerg Roedelbfe3bd42020-04-29 15:37:02 +0200541 return ERR_PTR(-ENODEV);
Rob Clark0ae349a2017-08-09 10:43:04 -0400542 }
543
Joerg Roedelbfe3bd42020-04-29 15:37:02 +0200544 return &qcom_iommu->iommu;
Rob Clark0ae349a2017-08-09 10:43:04 -0400545}
546
Joerg Roedelbfe3bd42020-04-29 15:37:02 +0200547static void qcom_iommu_release_device(struct device *dev)
Rob Clark0ae349a2017-08-09 10:43:04 -0400548{
Joerg Roedel09b5dff2020-03-26 16:08:39 +0100549 struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
Rob Clark0ae349a2017-08-09 10:43:04 -0400550
551 if (!qcom_iommu)
552 return;
553
Rob Clark0ae349a2017-08-09 10:43:04 -0400554 iommu_fwspec_free(dev);
555}
556
557static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
558{
559 struct qcom_iommu_dev *qcom_iommu;
560 struct platform_device *iommu_pdev;
561 unsigned asid = args->args[0];
562
563 if (args->args_count != 1) {
564 dev_err(dev, "incorrect number of iommu params found for %s "
565 "(found %d, expected 1)\n",
566 args->np->full_name, args->args_count);
567 return -EINVAL;
568 }
569
570 iommu_pdev = of_find_device_by_node(args->np);
571 if (WARN_ON(!iommu_pdev))
572 return -EINVAL;
573
574 qcom_iommu = platform_get_drvdata(iommu_pdev);
575
576 /* make sure the asid specified in dt is valid, so we don't have
577 * to sanity check this elsewhere, since 'asid - 1' is used to
578 * index into qcom_iommu->ctxs:
579 */
580 if (WARN_ON(asid < 1) ||
581 WARN_ON(asid > qcom_iommu->num_ctxs))
582 return -EINVAL;
583
Joerg Roedel09b5dff2020-03-26 16:08:39 +0100584 if (!dev_iommu_priv_get(dev)) {
585 dev_iommu_priv_set(dev, qcom_iommu);
Rob Clark0ae349a2017-08-09 10:43:04 -0400586 } else {
587 /* make sure devices iommus dt node isn't referring to
588 * multiple different iommu devices. Multiple context
589 * banks are ok, but multiple devices are not:
590 */
Joerg Roedel09b5dff2020-03-26 16:08:39 +0100591 if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev)))
Rob Clark0ae349a2017-08-09 10:43:04 -0400592 return -EINVAL;
593 }
594
595 return iommu_fwspec_add_ids(dev, &asid, 1);
596}
597
598static const struct iommu_ops qcom_iommu_ops = {
599 .capable = qcom_iommu_capable,
600 .domain_alloc = qcom_iommu_domain_alloc,
601 .domain_free = qcom_iommu_domain_free,
602 .attach_dev = qcom_iommu_attach_dev,
603 .detach_dev = qcom_iommu_detach_dev,
604 .map = qcom_iommu_map,
605 .unmap = qcom_iommu_unmap,
Will Deacon56f8af52019-07-02 16:44:06 +0100606 .flush_iotlb_all = qcom_iommu_flush_iotlb_all,
Robin Murphy4d689b62017-09-28 15:55:02 +0100607 .iotlb_sync = qcom_iommu_iotlb_sync,
Rob Clark0ae349a2017-08-09 10:43:04 -0400608 .iova_to_phys = qcom_iommu_iova_to_phys,
Joerg Roedelbfe3bd42020-04-29 15:37:02 +0200609 .probe_device = qcom_iommu_probe_device,
610 .release_device = qcom_iommu_release_device,
Rob Clark0ae349a2017-08-09 10:43:04 -0400611 .device_group = generic_device_group,
612 .of_xlate = qcom_iommu_of_xlate,
613 .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
614};
615
616static int qcom_iommu_enable_clocks(struct qcom_iommu_dev *qcom_iommu)
617{
618 int ret;
619
620 ret = clk_prepare_enable(qcom_iommu->iface_clk);
621 if (ret) {
622 dev_err(qcom_iommu->dev, "Couldn't enable iface_clk\n");
623 return ret;
624 }
625
626 ret = clk_prepare_enable(qcom_iommu->bus_clk);
627 if (ret) {
628 dev_err(qcom_iommu->dev, "Couldn't enable bus_clk\n");
629 clk_disable_unprepare(qcom_iommu->iface_clk);
630 return ret;
631 }
632
633 return 0;
634}
635
636static void qcom_iommu_disable_clocks(struct qcom_iommu_dev *qcom_iommu)
637{
638 clk_disable_unprepare(qcom_iommu->bus_clk);
639 clk_disable_unprepare(qcom_iommu->iface_clk);
640}
641
Stanimir Varbanovd051f282017-08-09 10:43:05 -0400642static int qcom_iommu_sec_ptbl_init(struct device *dev)
643{
644 size_t psize = 0;
645 unsigned int spare = 0;
646 void *cpu_addr;
647 dma_addr_t paddr;
648 unsigned long attrs;
649 static bool allocated = false;
650 int ret;
651
652 if (allocated)
653 return 0;
654
655 ret = qcom_scm_iommu_secure_ptbl_size(spare, &psize);
656 if (ret) {
657 dev_err(dev, "failed to get iommu secure pgtable size (%d)\n",
658 ret);
659 return ret;
660 }
661
662 dev_info(dev, "iommu sec: pgtable size: %zu\n", psize);
663
664 attrs = DMA_ATTR_NO_KERNEL_MAPPING;
665
666 cpu_addr = dma_alloc_attrs(dev, psize, &paddr, GFP_KERNEL, attrs);
667 if (!cpu_addr) {
668 dev_err(dev, "failed to allocate %zu bytes for pgtable\n",
669 psize);
670 return -ENOMEM;
671 }
672
673 ret = qcom_scm_iommu_secure_ptbl_init(paddr, psize, spare);
674 if (ret) {
675 dev_err(dev, "failed to init iommu pgtable (%d)\n", ret);
676 goto free_mem;
677 }
678
679 allocated = true;
680 return 0;
681
682free_mem:
683 dma_free_attrs(dev, psize, cpu_addr, paddr, attrs);
684 return ret;
685}
686
Rob Clark0ae349a2017-08-09 10:43:04 -0400687static int get_asid(const struct device_node *np)
688{
689 u32 reg;
690
691 /* read the "reg" property directly to get the relative address
692 * of the context bank, and calculate the asid from that:
693 */
694 if (of_property_read_u32_index(np, "reg", 0, &reg))
695 return -ENODEV;
696
697 return reg / 0x1000; /* context banks are 0x1000 apart */
698}
699
700static int qcom_iommu_ctx_probe(struct platform_device *pdev)
701{
702 struct qcom_iommu_ctx *ctx;
703 struct device *dev = &pdev->dev;
704 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev->parent);
705 struct resource *res;
706 int ret, irq;
707
708 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
709 if (!ctx)
710 return -ENOMEM;
711
712 ctx->dev = dev;
713 platform_set_drvdata(pdev, ctx);
714
715 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
716 ctx->base = devm_ioremap_resource(dev, res);
717 if (IS_ERR(ctx->base))
718 return PTR_ERR(ctx->base);
719
720 irq = platform_get_irq(pdev, 0);
Stephen Boyd086f9efa2019-07-30 11:15:22 -0700721 if (irq < 0)
Rob Clark0ae349a2017-08-09 10:43:04 -0400722 return -ENODEV;
Rob Clark0ae349a2017-08-09 10:43:04 -0400723
724 /* clear IRQs before registering fault handler, just in case the
725 * boot-loader left us a surprise:
726 */
727 iommu_writel(ctx, ARM_SMMU_CB_FSR, iommu_readl(ctx, ARM_SMMU_CB_FSR));
728
729 ret = devm_request_irq(dev, irq,
730 qcom_iommu_fault,
731 IRQF_SHARED,
732 "qcom-iommu-fault",
733 ctx);
734 if (ret) {
735 dev_err(dev, "failed to request IRQ %u\n", irq);
736 return ret;
737 }
738
739 ret = get_asid(dev->of_node);
740 if (ret < 0) {
741 dev_err(dev, "missing reg property\n");
742 return ret;
743 }
744
745 ctx->asid = ret;
746
747 dev_dbg(dev, "found asid %u\n", ctx->asid);
748
749 qcom_iommu->ctxs[ctx->asid - 1] = ctx;
750
751 return 0;
752}
753
754static int qcom_iommu_ctx_remove(struct platform_device *pdev)
755{
756 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(pdev->dev.parent);
757 struct qcom_iommu_ctx *ctx = platform_get_drvdata(pdev);
758
759 platform_set_drvdata(pdev, NULL);
760
761 qcom_iommu->ctxs[ctx->asid - 1] = NULL;
762
763 return 0;
764}
765
766static const struct of_device_id ctx_of_match[] = {
767 { .compatible = "qcom,msm-iommu-v1-ns" },
768 { .compatible = "qcom,msm-iommu-v1-sec" },
769 { /* sentinel */ }
770};
771
772static struct platform_driver qcom_iommu_ctx_driver = {
773 .driver = {
774 .name = "qcom-iommu-ctx",
775 .of_match_table = of_match_ptr(ctx_of_match),
776 },
777 .probe = qcom_iommu_ctx_probe,
778 .remove = qcom_iommu_ctx_remove,
779};
780
Stanimir Varbanovd051f282017-08-09 10:43:05 -0400781static bool qcom_iommu_has_secure_context(struct qcom_iommu_dev *qcom_iommu)
782{
783 struct device_node *child;
784
785 for_each_child_of_node(qcom_iommu->dev->of_node, child)
786 if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec"))
787 return true;
788
789 return false;
790}
791
Rob Clark0ae349a2017-08-09 10:43:04 -0400792static int qcom_iommu_device_probe(struct platform_device *pdev)
793{
794 struct device_node *child;
795 struct qcom_iommu_dev *qcom_iommu;
796 struct device *dev = &pdev->dev;
797 struct resource *res;
Gustavo A. R. Silva87585532019-08-29 23:03:27 -0500798 int ret, max_asid = 0;
Rob Clark0ae349a2017-08-09 10:43:04 -0400799
800 /* find the max asid (which is 1:1 to ctx bank idx), so we know how
801 * many child ctx devices we have:
802 */
803 for_each_child_of_node(dev->of_node, child)
804 max_asid = max(max_asid, get_asid(child));
805
Gustavo A. R. Silva87585532019-08-29 23:03:27 -0500806 qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid),
807 GFP_KERNEL);
Rob Clark0ae349a2017-08-09 10:43:04 -0400808 if (!qcom_iommu)
809 return -ENOMEM;
810 qcom_iommu->num_ctxs = max_asid;
811 qcom_iommu->dev = dev;
812
813 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Tang Binb52649a2020-04-18 21:47:03 +0800814 if (res) {
Rob Clark0ae349a2017-08-09 10:43:04 -0400815 qcom_iommu->local_base = devm_ioremap_resource(dev, res);
Tang Binb52649a2020-04-18 21:47:03 +0800816 if (IS_ERR(qcom_iommu->local_base))
817 return PTR_ERR(qcom_iommu->local_base);
818 }
Rob Clark0ae349a2017-08-09 10:43:04 -0400819
820 qcom_iommu->iface_clk = devm_clk_get(dev, "iface");
821 if (IS_ERR(qcom_iommu->iface_clk)) {
822 dev_err(dev, "failed to get iface clock\n");
823 return PTR_ERR(qcom_iommu->iface_clk);
824 }
825
826 qcom_iommu->bus_clk = devm_clk_get(dev, "bus");
827 if (IS_ERR(qcom_iommu->bus_clk)) {
828 dev_err(dev, "failed to get bus clock\n");
829 return PTR_ERR(qcom_iommu->bus_clk);
830 }
831
832 if (of_property_read_u32(dev->of_node, "qcom,iommu-secure-id",
833 &qcom_iommu->sec_id)) {
834 dev_err(dev, "missing qcom,iommu-secure-id property\n");
835 return -ENODEV;
836 }
837
Stanimir Varbanovd051f282017-08-09 10:43:05 -0400838 if (qcom_iommu_has_secure_context(qcom_iommu)) {
839 ret = qcom_iommu_sec_ptbl_init(dev);
840 if (ret) {
841 dev_err(dev, "cannot init secure pg table(%d)\n", ret);
842 return ret;
843 }
844 }
845
Rob Clark0ae349a2017-08-09 10:43:04 -0400846 platform_set_drvdata(pdev, qcom_iommu);
847
848 pm_runtime_enable(dev);
849
850 /* register context bank devices, which are child nodes: */
851 ret = devm_of_platform_populate(dev);
852 if (ret) {
853 dev_err(dev, "Failed to populate iommu contexts\n");
854 return ret;
855 }
856
857 ret = iommu_device_sysfs_add(&qcom_iommu->iommu, dev, NULL,
858 dev_name(dev));
859 if (ret) {
860 dev_err(dev, "Failed to register iommu in sysfs\n");
861 return ret;
862 }
863
864 iommu_device_set_ops(&qcom_iommu->iommu, &qcom_iommu_ops);
865 iommu_device_set_fwnode(&qcom_iommu->iommu, dev->fwnode);
866
867 ret = iommu_device_register(&qcom_iommu->iommu);
868 if (ret) {
869 dev_err(dev, "Failed to register iommu\n");
870 return ret;
871 }
872
873 bus_set_iommu(&platform_bus_type, &qcom_iommu_ops);
874
875 if (qcom_iommu->local_base) {
876 pm_runtime_get_sync(dev);
877 writel_relaxed(0xffffffff, qcom_iommu->local_base + SMMU_INTR_SEL_NS);
878 pm_runtime_put_sync(dev);
879 }
880
881 return 0;
882}
883
884static int qcom_iommu_device_remove(struct platform_device *pdev)
885{
886 struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev);
887
888 bus_set_iommu(&platform_bus_type, NULL);
889
890 pm_runtime_force_suspend(&pdev->dev);
891 platform_set_drvdata(pdev, NULL);
892 iommu_device_sysfs_remove(&qcom_iommu->iommu);
893 iommu_device_unregister(&qcom_iommu->iommu);
894
895 return 0;
896}
897
Arnd Bergmann6ce5b0f2017-08-23 15:42:45 +0200898static int __maybe_unused qcom_iommu_resume(struct device *dev)
Rob Clark0ae349a2017-08-09 10:43:04 -0400899{
Wolfram Sang7d1bf142018-04-19 16:05:54 +0200900 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
Rob Clark0ae349a2017-08-09 10:43:04 -0400901
902 return qcom_iommu_enable_clocks(qcom_iommu);
903}
904
Arnd Bergmann6ce5b0f2017-08-23 15:42:45 +0200905static int __maybe_unused qcom_iommu_suspend(struct device *dev)
Rob Clark0ae349a2017-08-09 10:43:04 -0400906{
Wolfram Sang7d1bf142018-04-19 16:05:54 +0200907 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
Rob Clark0ae349a2017-08-09 10:43:04 -0400908
909 qcom_iommu_disable_clocks(qcom_iommu);
910
911 return 0;
912}
Rob Clark0ae349a2017-08-09 10:43:04 -0400913
914static const struct dev_pm_ops qcom_iommu_pm_ops = {
915 SET_RUNTIME_PM_OPS(qcom_iommu_suspend, qcom_iommu_resume, NULL)
916 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
917 pm_runtime_force_resume)
918};
919
920static const struct of_device_id qcom_iommu_of_match[] = {
921 { .compatible = "qcom,msm-iommu-v1" },
922 { /* sentinel */ }
923};
Rob Clark0ae349a2017-08-09 10:43:04 -0400924
925static struct platform_driver qcom_iommu_driver = {
926 .driver = {
927 .name = "qcom-iommu",
928 .of_match_table = of_match_ptr(qcom_iommu_of_match),
929 .pm = &qcom_iommu_pm_ops,
930 },
931 .probe = qcom_iommu_device_probe,
932 .remove = qcom_iommu_device_remove,
933};
934
935static int __init qcom_iommu_init(void)
936{
937 int ret;
938
939 ret = platform_driver_register(&qcom_iommu_ctx_driver);
940 if (ret)
941 return ret;
942
943 ret = platform_driver_register(&qcom_iommu_driver);
944 if (ret)
945 platform_driver_unregister(&qcom_iommu_ctx_driver);
946
947 return ret;
948}
Paul Gortmakerf295cf22018-12-01 14:19:14 -0500949device_initcall(qcom_iommu_init);