blob: 5b3b270972f809c62eb11714613e53e75fc63320 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Rob Clark0ae349a2017-08-09 10:43:04 -04002/*
3 * IOMMU API for QCOM secure IOMMUs. Somewhat based on arm-smmu.c
4 *
Rob Clark0ae349a2017-08-09 10:43:04 -04005 * Copyright (C) 2013 ARM Limited
6 * Copyright (C) 2017 Red Hat
7 */
8
9#include <linux/atomic.h>
Robin Murphy620565a2019-08-15 19:37:25 +010010#include <linux/bitfield.h>
Rob Clark0ae349a2017-08-09 10:43:04 -040011#include <linux/clk.h>
12#include <linux/delay.h>
13#include <linux/dma-iommu.h>
14#include <linux/dma-mapping.h>
15#include <linux/err.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/io-64-nonatomic-hi-lo.h>
Rob Herringb77cf112019-02-05 10:37:31 -060019#include <linux/io-pgtable.h>
Rob Clark0ae349a2017-08-09 10:43:04 -040020#include <linux/iommu.h>
21#include <linux/iopoll.h>
22#include <linux/kconfig.h>
Paul Gortmakerf295cf22018-12-01 14:19:14 -050023#include <linux/init.h>
Rob Clark0ae349a2017-08-09 10:43:04 -040024#include <linux/mutex.h>
25#include <linux/of.h>
26#include <linux/of_address.h>
27#include <linux/of_device.h>
28#include <linux/of_iommu.h>
29#include <linux/platform_device.h>
30#include <linux/pm.h>
31#include <linux/pm_runtime.h>
32#include <linux/qcom_scm.h>
33#include <linux/slab.h>
34#include <linux/spinlock.h>
35
Robin Murphyc5fc6482019-08-15 19:37:32 +010036#include "arm-smmu.h"
Rob Clark0ae349a2017-08-09 10:43:04 -040037
38#define SMMU_INTR_SEL_NS 0x2000
39
40struct qcom_iommu_ctx;
41
42struct qcom_iommu_dev {
43 /* IOMMU core code handle */
44 struct iommu_device iommu;
45 struct device *dev;
46 struct clk *iface_clk;
47 struct clk *bus_clk;
48 void __iomem *local_base;
49 u32 sec_id;
50 u8 num_ctxs;
Gustavo A. R. Silvae26060e2020-02-12 18:14:01 -060051 struct qcom_iommu_ctx *ctxs[]; /* indexed by asid-1 */
Rob Clark0ae349a2017-08-09 10:43:04 -040052};
53
54struct qcom_iommu_ctx {
55 struct device *dev;
56 void __iomem *base;
57 bool secure_init;
58 u8 asid; /* asid and ctx bank # are 1:1 */
Rob Clark049541e2017-11-03 10:50:33 -060059 struct iommu_domain *domain;
Rob Clark0ae349a2017-08-09 10:43:04 -040060};
61
62struct qcom_iommu_domain {
63 struct io_pgtable_ops *pgtbl_ops;
64 spinlock_t pgtbl_lock;
65 struct mutex init_mutex; /* Protects iommu pointer */
66 struct iommu_domain domain;
67 struct qcom_iommu_dev *iommu;
68};
69
70static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom)
71{
72 return container_of(dom, struct qcom_iommu_domain, domain);
73}
74
75static const struct iommu_ops qcom_iommu_ops;
76
Joerg Roedel09b5dff2020-03-26 16:08:39 +010077static struct qcom_iommu_dev * to_iommu(struct device *dev)
Rob Clark0ae349a2017-08-09 10:43:04 -040078{
Joerg Roedel09b5dff2020-03-26 16:08:39 +010079 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
80
Rob Clark0ae349a2017-08-09 10:43:04 -040081 if (!fwspec || fwspec->ops != &qcom_iommu_ops)
82 return NULL;
Joerg Roedel09b5dff2020-03-26 16:08:39 +010083
84 return dev_iommu_priv_get(dev);
Rob Clark0ae349a2017-08-09 10:43:04 -040085}
86
Joerg Roedel09b5dff2020-03-26 16:08:39 +010087static struct qcom_iommu_ctx * to_ctx(struct device *dev, unsigned asid)
Rob Clark0ae349a2017-08-09 10:43:04 -040088{
Joerg Roedel09b5dff2020-03-26 16:08:39 +010089 struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
Rob Clark0ae349a2017-08-09 10:43:04 -040090 if (!qcom_iommu)
91 return NULL;
92 return qcom_iommu->ctxs[asid - 1];
93}
94
95static inline void
96iommu_writel(struct qcom_iommu_ctx *ctx, unsigned reg, u32 val)
97{
98 writel_relaxed(val, ctx->base + reg);
99}
100
101static inline void
102iommu_writeq(struct qcom_iommu_ctx *ctx, unsigned reg, u64 val)
103{
104 writeq_relaxed(val, ctx->base + reg);
105}
106
107static inline u32
108iommu_readl(struct qcom_iommu_ctx *ctx, unsigned reg)
109{
110 return readl_relaxed(ctx->base + reg);
111}
112
113static inline u64
114iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg)
115{
116 return readq_relaxed(ctx->base + reg);
117}
118
119static void qcom_iommu_tlb_sync(void *cookie)
120{
Joerg Roedel09b5dff2020-03-26 16:08:39 +0100121 struct iommu_fwspec *fwspec;
122 struct device *dev = cookie;
Rob Clark0ae349a2017-08-09 10:43:04 -0400123 unsigned i;
124
Joerg Roedel09b5dff2020-03-26 16:08:39 +0100125 fwspec = dev_iommu_fwspec_get(dev);
126
Rob Clark0ae349a2017-08-09 10:43:04 -0400127 for (i = 0; i < fwspec->num_ids; i++) {
Joerg Roedel09b5dff2020-03-26 16:08:39 +0100128 struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
Rob Clark0ae349a2017-08-09 10:43:04 -0400129 unsigned int val, ret;
130
131 iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0);
132
133 ret = readl_poll_timeout(ctx->base + ARM_SMMU_CB_TLBSTATUS, val,
134 (val & 0x1) == 0, 0, 5000000);
135 if (ret)
136 dev_err(ctx->dev, "timeout waiting for TLB SYNC\n");
137 }
138}
139
140static void qcom_iommu_tlb_inv_context(void *cookie)
141{
Joerg Roedel09b5dff2020-03-26 16:08:39 +0100142 struct device *dev = cookie;
143 struct iommu_fwspec *fwspec;
Rob Clark0ae349a2017-08-09 10:43:04 -0400144 unsigned i;
145
Joerg Roedel09b5dff2020-03-26 16:08:39 +0100146 fwspec = dev_iommu_fwspec_get(dev);
147
Rob Clark0ae349a2017-08-09 10:43:04 -0400148 for (i = 0; i < fwspec->num_ids; i++) {
Joerg Roedel09b5dff2020-03-26 16:08:39 +0100149 struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
Rob Clark0ae349a2017-08-09 10:43:04 -0400150 iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid);
151 }
152
153 qcom_iommu_tlb_sync(cookie);
154}
155
156static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size,
157 size_t granule, bool leaf, void *cookie)
158{
Joerg Roedel09b5dff2020-03-26 16:08:39 +0100159 struct device *dev = cookie;
160 struct iommu_fwspec *fwspec;
Rob Clark0ae349a2017-08-09 10:43:04 -0400161 unsigned i, reg;
162
163 reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
164
Joerg Roedel09b5dff2020-03-26 16:08:39 +0100165 fwspec = dev_iommu_fwspec_get(dev);
166
Rob Clark0ae349a2017-08-09 10:43:04 -0400167 for (i = 0; i < fwspec->num_ids; i++) {
Joerg Roedel09b5dff2020-03-26 16:08:39 +0100168 struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
Rob Clark0ae349a2017-08-09 10:43:04 -0400169 size_t s = size;
170
Robin Murphya5b396c2019-08-15 19:37:22 +0100171 iova = (iova >> 12) << 12;
Rob Clark0ae349a2017-08-09 10:43:04 -0400172 iova |= ctx->asid;
173 do {
174 iommu_writel(ctx, reg, iova);
175 iova += granule;
176 } while (s -= granule);
177 }
178}
179
Will Deacon05aed942019-07-02 16:44:25 +0100180static void qcom_iommu_tlb_flush_walk(unsigned long iova, size_t size,
181 size_t granule, void *cookie)
182{
183 qcom_iommu_tlb_inv_range_nosync(iova, size, granule, false, cookie);
184 qcom_iommu_tlb_sync(cookie);
185}
186
187static void qcom_iommu_tlb_flush_leaf(unsigned long iova, size_t size,
188 size_t granule, void *cookie)
189{
190 qcom_iommu_tlb_inv_range_nosync(iova, size, granule, true, cookie);
191 qcom_iommu_tlb_sync(cookie);
192}
193
Will Deacon3951c412019-07-02 16:45:15 +0100194static void qcom_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
195 unsigned long iova, size_t granule,
Will Deaconabfd6fe2019-07-02 16:44:41 +0100196 void *cookie)
197{
198 qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie);
199}
200
Will Deacon298f78892019-07-02 16:43:34 +0100201static const struct iommu_flush_ops qcom_flush_ops = {
Rob Clark0ae349a2017-08-09 10:43:04 -0400202 .tlb_flush_all = qcom_iommu_tlb_inv_context,
Will Deacon05aed942019-07-02 16:44:25 +0100203 .tlb_flush_walk = qcom_iommu_tlb_flush_walk,
204 .tlb_flush_leaf = qcom_iommu_tlb_flush_leaf,
Will Deaconabfd6fe2019-07-02 16:44:41 +0100205 .tlb_add_page = qcom_iommu_tlb_add_page,
Rob Clark0ae349a2017-08-09 10:43:04 -0400206};
207
208static irqreturn_t qcom_iommu_fault(int irq, void *dev)
209{
210 struct qcom_iommu_ctx *ctx = dev;
211 u32 fsr, fsynr;
212 u64 iova;
213
214 fsr = iommu_readl(ctx, ARM_SMMU_CB_FSR);
215
Will Deaconfba6e962020-01-10 13:20:03 +0000216 if (!(fsr & ARM_SMMU_FSR_FAULT))
Rob Clark0ae349a2017-08-09 10:43:04 -0400217 return IRQ_NONE;
218
219 fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0);
220 iova = iommu_readq(ctx, ARM_SMMU_CB_FAR);
221
Rob Clark049541e2017-11-03 10:50:33 -0600222 if (!report_iommu_fault(ctx->domain, ctx->dev, iova, 0)) {
223 dev_err_ratelimited(ctx->dev,
224 "Unhandled context fault: fsr=0x%x, "
225 "iova=0x%016llx, fsynr=0x%x, cb=%d\n",
226 fsr, iova, fsynr, ctx->asid);
227 }
Rob Clark0ae349a2017-08-09 10:43:04 -0400228
229 iommu_writel(ctx, ARM_SMMU_CB_FSR, fsr);
Will Deaconfba6e962020-01-10 13:20:03 +0000230 iommu_writel(ctx, ARM_SMMU_CB_RESUME, ARM_SMMU_RESUME_TERMINATE);
Rob Clark0ae349a2017-08-09 10:43:04 -0400231
232 return IRQ_HANDLED;
233}
234
235static int qcom_iommu_init_domain(struct iommu_domain *domain,
236 struct qcom_iommu_dev *qcom_iommu,
Joerg Roedel09b5dff2020-03-26 16:08:39 +0100237 struct device *dev)
Rob Clark0ae349a2017-08-09 10:43:04 -0400238{
239 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
Joerg Roedel09b5dff2020-03-26 16:08:39 +0100240 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Rob Clark0ae349a2017-08-09 10:43:04 -0400241 struct io_pgtable_ops *pgtbl_ops;
242 struct io_pgtable_cfg pgtbl_cfg;
243 int i, ret = 0;
244 u32 reg;
245
246 mutex_lock(&qcom_domain->init_mutex);
247 if (qcom_domain->iommu)
248 goto out_unlock;
249
250 pgtbl_cfg = (struct io_pgtable_cfg) {
251 .pgsize_bitmap = qcom_iommu_ops.pgsize_bitmap,
252 .ias = 32,
253 .oas = 40,
Will Deacon298f78892019-07-02 16:43:34 +0100254 .tlb = &qcom_flush_ops,
Rob Clark0ae349a2017-08-09 10:43:04 -0400255 .iommu_dev = qcom_iommu->dev,
256 };
257
258 qcom_domain->iommu = qcom_iommu;
Joerg Roedel09b5dff2020-03-26 16:08:39 +0100259 pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, dev);
Rob Clark0ae349a2017-08-09 10:43:04 -0400260 if (!pgtbl_ops) {
261 dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n");
262 ret = -ENOMEM;
263 goto out_clear_iommu;
264 }
265
266 /* Update the domain's page sizes to reflect the page table format */
267 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
268 domain->geometry.aperture_end = (1ULL << pgtbl_cfg.ias) - 1;
269 domain->geometry.force_aperture = true;
270
271 for (i = 0; i < fwspec->num_ids; i++) {
Joerg Roedel09b5dff2020-03-26 16:08:39 +0100272 struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
Rob Clark0ae349a2017-08-09 10:43:04 -0400273
274 if (!ctx->secure_init) {
275 ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid);
276 if (ret) {
277 dev_err(qcom_iommu->dev, "secure init failed: %d\n", ret);
278 goto out_clear_iommu;
279 }
280 ctx->secure_init = true;
281 }
282
283 /* TTBRs */
284 iommu_writeq(ctx, ARM_SMMU_CB_TTBR0,
Robin Murphyd1e5f262019-10-25 19:08:37 +0100285 pgtbl_cfg.arm_lpae_s1_cfg.ttbr |
Will Deaconfba6e962020-01-10 13:20:03 +0000286 FIELD_PREP(ARM_SMMU_TTBRn_ASID, ctx->asid));
Robin Murphyfb485eb2019-10-25 19:08:38 +0100287 iommu_writeq(ctx, ARM_SMMU_CB_TTBR1, 0);
Rob Clark0ae349a2017-08-09 10:43:04 -0400288
Robin Murphy620565a2019-08-15 19:37:25 +0100289 /* TCR */
290 iommu_writel(ctx, ARM_SMMU_CB_TCR2,
Robin Murphyfb485eb2019-10-25 19:08:38 +0100291 arm_smmu_lpae_tcr2(&pgtbl_cfg));
Robin Murphy620565a2019-08-15 19:37:25 +0100292 iommu_writel(ctx, ARM_SMMU_CB_TCR,
Will Deaconfba6e962020-01-10 13:20:03 +0000293 arm_smmu_lpae_tcr(&pgtbl_cfg) | ARM_SMMU_TCR_EAE);
Rob Clark0ae349a2017-08-09 10:43:04 -0400294
295 /* MAIRs (stage-1 only) */
296 iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR0,
Robin Murphy205577a2019-10-25 19:08:36 +0100297 pgtbl_cfg.arm_lpae_s1_cfg.mair);
Rob Clark0ae349a2017-08-09 10:43:04 -0400298 iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR1,
Robin Murphy205577a2019-10-25 19:08:36 +0100299 pgtbl_cfg.arm_lpae_s1_cfg.mair >> 32);
Rob Clark0ae349a2017-08-09 10:43:04 -0400300
301 /* SCTLR */
Will Deaconfba6e962020-01-10 13:20:03 +0000302 reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE |
303 ARM_SMMU_SCTLR_AFE | ARM_SMMU_SCTLR_TRE |
304 ARM_SMMU_SCTLR_M | ARM_SMMU_SCTLR_S1_ASIDPNE |
305 ARM_SMMU_SCTLR_CFCFG;
Rob Clark0ae349a2017-08-09 10:43:04 -0400306
307 if (IS_ENABLED(CONFIG_BIG_ENDIAN))
Will Deaconfba6e962020-01-10 13:20:03 +0000308 reg |= ARM_SMMU_SCTLR_E;
Rob Clark0ae349a2017-08-09 10:43:04 -0400309
310 iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg);
Rob Clark049541e2017-11-03 10:50:33 -0600311
312 ctx->domain = domain;
Rob Clark0ae349a2017-08-09 10:43:04 -0400313 }
314
315 mutex_unlock(&qcom_domain->init_mutex);
316
317 /* Publish page table ops for map/unmap */
318 qcom_domain->pgtbl_ops = pgtbl_ops;
319
320 return 0;
321
322out_clear_iommu:
323 qcom_domain->iommu = NULL;
324out_unlock:
325 mutex_unlock(&qcom_domain->init_mutex);
326 return ret;
327}
328
329static struct iommu_domain *qcom_iommu_domain_alloc(unsigned type)
330{
331 struct qcom_iommu_domain *qcom_domain;
332
333 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
334 return NULL;
335 /*
336 * Allocate the domain and initialise some of its data structures.
337 * We can't really do anything meaningful until we've added a
338 * master.
339 */
340 qcom_domain = kzalloc(sizeof(*qcom_domain), GFP_KERNEL);
341 if (!qcom_domain)
342 return NULL;
343
344 if (type == IOMMU_DOMAIN_DMA &&
345 iommu_get_dma_cookie(&qcom_domain->domain)) {
346 kfree(qcom_domain);
347 return NULL;
348 }
349
350 mutex_init(&qcom_domain->init_mutex);
351 spin_lock_init(&qcom_domain->pgtbl_lock);
352
353 return &qcom_domain->domain;
354}
355
356static void qcom_iommu_domain_free(struct iommu_domain *domain)
357{
358 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
359
Rob Clark0ae349a2017-08-09 10:43:04 -0400360 iommu_put_dma_cookie(domain);
361
Robin Murphyfaf305c2020-02-18 18:12:41 +0000362 if (qcom_domain->iommu) {
363 /*
364 * NOTE: unmap can be called after client device is powered
365 * off, for example, with GPUs or anything involving dma-buf.
366 * So we cannot rely on the device_link. Make sure the IOMMU
367 * is on to avoid unclocked accesses in the TLB inv path:
368 */
369 pm_runtime_get_sync(qcom_domain->iommu->dev);
370 free_io_pgtable_ops(qcom_domain->pgtbl_ops);
371 pm_runtime_put_sync(qcom_domain->iommu->dev);
372 }
Rob Clark0ae349a2017-08-09 10:43:04 -0400373
374 kfree(qcom_domain);
375}
376
377static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
378{
Joerg Roedel09b5dff2020-03-26 16:08:39 +0100379 struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
Rob Clark0ae349a2017-08-09 10:43:04 -0400380 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
381 int ret;
382
383 if (!qcom_iommu) {
384 dev_err(dev, "cannot attach to IOMMU, is it on the same bus?\n");
385 return -ENXIO;
386 }
387
388 /* Ensure that the domain is finalized */
389 pm_runtime_get_sync(qcom_iommu->dev);
Joerg Roedel09b5dff2020-03-26 16:08:39 +0100390 ret = qcom_iommu_init_domain(domain, qcom_iommu, dev);
Rob Clark0ae349a2017-08-09 10:43:04 -0400391 pm_runtime_put_sync(qcom_iommu->dev);
392 if (ret < 0)
393 return ret;
394
395 /*
396 * Sanity check the domain. We don't support domains across
397 * different IOMMUs.
398 */
399 if (qcom_domain->iommu != qcom_iommu) {
400 dev_err(dev, "cannot attach to IOMMU %s while already "
401 "attached to domain on IOMMU %s\n",
402 dev_name(qcom_domain->iommu->dev),
403 dev_name(qcom_iommu->dev));
404 return -EINVAL;
405 }
406
407 return 0;
408}
409
410static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *dev)
411{
Rob Clark0ae349a2017-08-09 10:43:04 -0400412 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
Joerg Roedel09b5dff2020-03-26 16:08:39 +0100413 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
414 struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
Rob Clark0ae349a2017-08-09 10:43:04 -0400415 unsigned i;
416
Robin Murphyfaf305c2020-02-18 18:12:41 +0000417 if (WARN_ON(!qcom_domain->iommu))
Rob Clark0ae349a2017-08-09 10:43:04 -0400418 return;
419
420 pm_runtime_get_sync(qcom_iommu->dev);
421 for (i = 0; i < fwspec->num_ids; i++) {
Joerg Roedel09b5dff2020-03-26 16:08:39 +0100422 struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
Rob Clark0ae349a2017-08-09 10:43:04 -0400423
424 /* Disable the context bank: */
425 iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
Rob Clark049541e2017-11-03 10:50:33 -0600426
427 ctx->domain = NULL;
Rob Clark0ae349a2017-08-09 10:43:04 -0400428 }
429 pm_runtime_put_sync(qcom_iommu->dev);
Rob Clark0ae349a2017-08-09 10:43:04 -0400430}
431
432static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
Tom Murphy781ca2d2019-09-08 09:56:38 -0700433 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
Rob Clark0ae349a2017-08-09 10:43:04 -0400434{
435 int ret;
436 unsigned long flags;
437 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
438 struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
439
440 if (!ops)
441 return -ENODEV;
442
443 spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
444 ret = ops->map(ops, iova, paddr, size, prot);
445 spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
446 return ret;
447}
448
449static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
Will Deacon56f8af52019-07-02 16:44:06 +0100450 size_t size, struct iommu_iotlb_gather *gather)
Rob Clark0ae349a2017-08-09 10:43:04 -0400451{
452 size_t ret;
453 unsigned long flags;
454 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
455 struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
456
457 if (!ops)
458 return 0;
459
460 /* NOTE: unmap can be called after client device is powered off,
461 * for example, with GPUs or anything involving dma-buf. So we
462 * cannot rely on the device_link. Make sure the IOMMU is on to
463 * avoid unclocked accesses in the TLB inv path:
464 */
465 pm_runtime_get_sync(qcom_domain->iommu->dev);
466 spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
Will Deacona2d3a382019-07-02 16:44:58 +0100467 ret = ops->unmap(ops, iova, size, gather);
Rob Clark0ae349a2017-08-09 10:43:04 -0400468 spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
469 pm_runtime_put_sync(qcom_domain->iommu->dev);
470
471 return ret;
472}
473
Will Deacon56f8af52019-07-02 16:44:06 +0100474static void qcom_iommu_flush_iotlb_all(struct iommu_domain *domain)
Robin Murphy4d689b62017-09-28 15:55:02 +0100475{
476 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
477 struct io_pgtable *pgtable = container_of(qcom_domain->pgtbl_ops,
478 struct io_pgtable, ops);
479 if (!qcom_domain->pgtbl_ops)
480 return;
481
482 pm_runtime_get_sync(qcom_domain->iommu->dev);
483 qcom_iommu_tlb_sync(pgtable->cookie);
484 pm_runtime_put_sync(qcom_domain->iommu->dev);
485}
486
Will Deacon56f8af52019-07-02 16:44:06 +0100487static void qcom_iommu_iotlb_sync(struct iommu_domain *domain,
488 struct iommu_iotlb_gather *gather)
489{
490 qcom_iommu_flush_iotlb_all(domain);
491}
492
Rob Clark0ae349a2017-08-09 10:43:04 -0400493static phys_addr_t qcom_iommu_iova_to_phys(struct iommu_domain *domain,
494 dma_addr_t iova)
495{
496 phys_addr_t ret;
497 unsigned long flags;
498 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
499 struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
500
501 if (!ops)
502 return 0;
503
504 spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
505 ret = ops->iova_to_phys(ops, iova);
506 spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
507
508 return ret;
509}
510
511static bool qcom_iommu_capable(enum iommu_cap cap)
512{
513 switch (cap) {
514 case IOMMU_CAP_CACHE_COHERENCY:
515 /*
516 * Return true here as the SMMU can always send out coherent
517 * requests.
518 */
519 return true;
520 case IOMMU_CAP_NOEXEC:
521 return true;
522 default:
523 return false;
524 }
525}
526
527static int qcom_iommu_add_device(struct device *dev)
528{
Joerg Roedel09b5dff2020-03-26 16:08:39 +0100529 struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
Rob Clark0ae349a2017-08-09 10:43:04 -0400530 struct iommu_group *group;
531 struct device_link *link;
532
533 if (!qcom_iommu)
534 return -ENODEV;
535
536 /*
537 * Establish the link between iommu and master, so that the
538 * iommu gets runtime enabled/disabled as per the master's
539 * needs.
540 */
541 link = device_link_add(dev, qcom_iommu->dev, DL_FLAG_PM_RUNTIME);
542 if (!link) {
543 dev_err(qcom_iommu->dev, "Unable to create device link between %s and %s\n",
544 dev_name(qcom_iommu->dev), dev_name(dev));
545 return -ENODEV;
546 }
547
548 group = iommu_group_get_for_dev(dev);
Christophe JAILLETda6b05d2019-09-16 22:29:36 +0200549 if (IS_ERR(group))
550 return PTR_ERR(group);
Rob Clark0ae349a2017-08-09 10:43:04 -0400551
552 iommu_group_put(group);
553 iommu_device_link(&qcom_iommu->iommu, dev);
554
555 return 0;
556}
557
558static void qcom_iommu_remove_device(struct device *dev)
559{
Joerg Roedel09b5dff2020-03-26 16:08:39 +0100560 struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
Rob Clark0ae349a2017-08-09 10:43:04 -0400561
562 if (!qcom_iommu)
563 return;
564
565 iommu_device_unlink(&qcom_iommu->iommu, dev);
566 iommu_group_remove_device(dev);
567 iommu_fwspec_free(dev);
568}
569
570static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
571{
572 struct qcom_iommu_dev *qcom_iommu;
573 struct platform_device *iommu_pdev;
574 unsigned asid = args->args[0];
575
576 if (args->args_count != 1) {
577 dev_err(dev, "incorrect number of iommu params found for %s "
578 "(found %d, expected 1)\n",
579 args->np->full_name, args->args_count);
580 return -EINVAL;
581 }
582
583 iommu_pdev = of_find_device_by_node(args->np);
584 if (WARN_ON(!iommu_pdev))
585 return -EINVAL;
586
587 qcom_iommu = platform_get_drvdata(iommu_pdev);
588
589 /* make sure the asid specified in dt is valid, so we don't have
590 * to sanity check this elsewhere, since 'asid - 1' is used to
591 * index into qcom_iommu->ctxs:
592 */
593 if (WARN_ON(asid < 1) ||
594 WARN_ON(asid > qcom_iommu->num_ctxs))
595 return -EINVAL;
596
Joerg Roedel09b5dff2020-03-26 16:08:39 +0100597 if (!dev_iommu_priv_get(dev)) {
598 dev_iommu_priv_set(dev, qcom_iommu);
Rob Clark0ae349a2017-08-09 10:43:04 -0400599 } else {
600 /* make sure devices iommus dt node isn't referring to
601 * multiple different iommu devices. Multiple context
602 * banks are ok, but multiple devices are not:
603 */
Joerg Roedel09b5dff2020-03-26 16:08:39 +0100604 if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev)))
Rob Clark0ae349a2017-08-09 10:43:04 -0400605 return -EINVAL;
606 }
607
608 return iommu_fwspec_add_ids(dev, &asid, 1);
609}
610
611static const struct iommu_ops qcom_iommu_ops = {
612 .capable = qcom_iommu_capable,
613 .domain_alloc = qcom_iommu_domain_alloc,
614 .domain_free = qcom_iommu_domain_free,
615 .attach_dev = qcom_iommu_attach_dev,
616 .detach_dev = qcom_iommu_detach_dev,
617 .map = qcom_iommu_map,
618 .unmap = qcom_iommu_unmap,
Will Deacon56f8af52019-07-02 16:44:06 +0100619 .flush_iotlb_all = qcom_iommu_flush_iotlb_all,
Robin Murphy4d689b62017-09-28 15:55:02 +0100620 .iotlb_sync = qcom_iommu_iotlb_sync,
Rob Clark0ae349a2017-08-09 10:43:04 -0400621 .iova_to_phys = qcom_iommu_iova_to_phys,
622 .add_device = qcom_iommu_add_device,
623 .remove_device = qcom_iommu_remove_device,
624 .device_group = generic_device_group,
625 .of_xlate = qcom_iommu_of_xlate,
626 .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
627};
628
629static int qcom_iommu_enable_clocks(struct qcom_iommu_dev *qcom_iommu)
630{
631 int ret;
632
633 ret = clk_prepare_enable(qcom_iommu->iface_clk);
634 if (ret) {
635 dev_err(qcom_iommu->dev, "Couldn't enable iface_clk\n");
636 return ret;
637 }
638
639 ret = clk_prepare_enable(qcom_iommu->bus_clk);
640 if (ret) {
641 dev_err(qcom_iommu->dev, "Couldn't enable bus_clk\n");
642 clk_disable_unprepare(qcom_iommu->iface_clk);
643 return ret;
644 }
645
646 return 0;
647}
648
649static void qcom_iommu_disable_clocks(struct qcom_iommu_dev *qcom_iommu)
650{
651 clk_disable_unprepare(qcom_iommu->bus_clk);
652 clk_disable_unprepare(qcom_iommu->iface_clk);
653}
654
Stanimir Varbanovd051f282017-08-09 10:43:05 -0400655static int qcom_iommu_sec_ptbl_init(struct device *dev)
656{
657 size_t psize = 0;
658 unsigned int spare = 0;
659 void *cpu_addr;
660 dma_addr_t paddr;
661 unsigned long attrs;
662 static bool allocated = false;
663 int ret;
664
665 if (allocated)
666 return 0;
667
668 ret = qcom_scm_iommu_secure_ptbl_size(spare, &psize);
669 if (ret) {
670 dev_err(dev, "failed to get iommu secure pgtable size (%d)\n",
671 ret);
672 return ret;
673 }
674
675 dev_info(dev, "iommu sec: pgtable size: %zu\n", psize);
676
677 attrs = DMA_ATTR_NO_KERNEL_MAPPING;
678
679 cpu_addr = dma_alloc_attrs(dev, psize, &paddr, GFP_KERNEL, attrs);
680 if (!cpu_addr) {
681 dev_err(dev, "failed to allocate %zu bytes for pgtable\n",
682 psize);
683 return -ENOMEM;
684 }
685
686 ret = qcom_scm_iommu_secure_ptbl_init(paddr, psize, spare);
687 if (ret) {
688 dev_err(dev, "failed to init iommu pgtable (%d)\n", ret);
689 goto free_mem;
690 }
691
692 allocated = true;
693 return 0;
694
695free_mem:
696 dma_free_attrs(dev, psize, cpu_addr, paddr, attrs);
697 return ret;
698}
699
Rob Clark0ae349a2017-08-09 10:43:04 -0400700static int get_asid(const struct device_node *np)
701{
702 u32 reg;
703
704 /* read the "reg" property directly to get the relative address
705 * of the context bank, and calculate the asid from that:
706 */
707 if (of_property_read_u32_index(np, "reg", 0, &reg))
708 return -ENODEV;
709
710 return reg / 0x1000; /* context banks are 0x1000 apart */
711}
712
713static int qcom_iommu_ctx_probe(struct platform_device *pdev)
714{
715 struct qcom_iommu_ctx *ctx;
716 struct device *dev = &pdev->dev;
717 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev->parent);
718 struct resource *res;
719 int ret, irq;
720
721 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
722 if (!ctx)
723 return -ENOMEM;
724
725 ctx->dev = dev;
726 platform_set_drvdata(pdev, ctx);
727
728 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
729 ctx->base = devm_ioremap_resource(dev, res);
730 if (IS_ERR(ctx->base))
731 return PTR_ERR(ctx->base);
732
733 irq = platform_get_irq(pdev, 0);
Stephen Boyd086f9efa2019-07-30 11:15:22 -0700734 if (irq < 0)
Rob Clark0ae349a2017-08-09 10:43:04 -0400735 return -ENODEV;
Rob Clark0ae349a2017-08-09 10:43:04 -0400736
737 /* clear IRQs before registering fault handler, just in case the
738 * boot-loader left us a surprise:
739 */
740 iommu_writel(ctx, ARM_SMMU_CB_FSR, iommu_readl(ctx, ARM_SMMU_CB_FSR));
741
742 ret = devm_request_irq(dev, irq,
743 qcom_iommu_fault,
744 IRQF_SHARED,
745 "qcom-iommu-fault",
746 ctx);
747 if (ret) {
748 dev_err(dev, "failed to request IRQ %u\n", irq);
749 return ret;
750 }
751
752 ret = get_asid(dev->of_node);
753 if (ret < 0) {
754 dev_err(dev, "missing reg property\n");
755 return ret;
756 }
757
758 ctx->asid = ret;
759
760 dev_dbg(dev, "found asid %u\n", ctx->asid);
761
762 qcom_iommu->ctxs[ctx->asid - 1] = ctx;
763
764 return 0;
765}
766
767static int qcom_iommu_ctx_remove(struct platform_device *pdev)
768{
769 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(pdev->dev.parent);
770 struct qcom_iommu_ctx *ctx = platform_get_drvdata(pdev);
771
772 platform_set_drvdata(pdev, NULL);
773
774 qcom_iommu->ctxs[ctx->asid - 1] = NULL;
775
776 return 0;
777}
778
779static const struct of_device_id ctx_of_match[] = {
780 { .compatible = "qcom,msm-iommu-v1-ns" },
781 { .compatible = "qcom,msm-iommu-v1-sec" },
782 { /* sentinel */ }
783};
784
785static struct platform_driver qcom_iommu_ctx_driver = {
786 .driver = {
787 .name = "qcom-iommu-ctx",
788 .of_match_table = of_match_ptr(ctx_of_match),
789 },
790 .probe = qcom_iommu_ctx_probe,
791 .remove = qcom_iommu_ctx_remove,
792};
793
Stanimir Varbanovd051f282017-08-09 10:43:05 -0400794static bool qcom_iommu_has_secure_context(struct qcom_iommu_dev *qcom_iommu)
795{
796 struct device_node *child;
797
798 for_each_child_of_node(qcom_iommu->dev->of_node, child)
799 if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec"))
800 return true;
801
802 return false;
803}
804
Rob Clark0ae349a2017-08-09 10:43:04 -0400805static int qcom_iommu_device_probe(struct platform_device *pdev)
806{
807 struct device_node *child;
808 struct qcom_iommu_dev *qcom_iommu;
809 struct device *dev = &pdev->dev;
810 struct resource *res;
Gustavo A. R. Silva87585532019-08-29 23:03:27 -0500811 int ret, max_asid = 0;
Rob Clark0ae349a2017-08-09 10:43:04 -0400812
813 /* find the max asid (which is 1:1 to ctx bank idx), so we know how
814 * many child ctx devices we have:
815 */
816 for_each_child_of_node(dev->of_node, child)
817 max_asid = max(max_asid, get_asid(child));
818
Gustavo A. R. Silva87585532019-08-29 23:03:27 -0500819 qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid),
820 GFP_KERNEL);
Rob Clark0ae349a2017-08-09 10:43:04 -0400821 if (!qcom_iommu)
822 return -ENOMEM;
823 qcom_iommu->num_ctxs = max_asid;
824 qcom_iommu->dev = dev;
825
826 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Tang Binb52649a2020-04-18 21:47:03 +0800827 if (res) {
Rob Clark0ae349a2017-08-09 10:43:04 -0400828 qcom_iommu->local_base = devm_ioremap_resource(dev, res);
Tang Binb52649a2020-04-18 21:47:03 +0800829 if (IS_ERR(qcom_iommu->local_base))
830 return PTR_ERR(qcom_iommu->local_base);
831 }
Rob Clark0ae349a2017-08-09 10:43:04 -0400832
833 qcom_iommu->iface_clk = devm_clk_get(dev, "iface");
834 if (IS_ERR(qcom_iommu->iface_clk)) {
835 dev_err(dev, "failed to get iface clock\n");
836 return PTR_ERR(qcom_iommu->iface_clk);
837 }
838
839 qcom_iommu->bus_clk = devm_clk_get(dev, "bus");
840 if (IS_ERR(qcom_iommu->bus_clk)) {
841 dev_err(dev, "failed to get bus clock\n");
842 return PTR_ERR(qcom_iommu->bus_clk);
843 }
844
845 if (of_property_read_u32(dev->of_node, "qcom,iommu-secure-id",
846 &qcom_iommu->sec_id)) {
847 dev_err(dev, "missing qcom,iommu-secure-id property\n");
848 return -ENODEV;
849 }
850
Stanimir Varbanovd051f282017-08-09 10:43:05 -0400851 if (qcom_iommu_has_secure_context(qcom_iommu)) {
852 ret = qcom_iommu_sec_ptbl_init(dev);
853 if (ret) {
854 dev_err(dev, "cannot init secure pg table(%d)\n", ret);
855 return ret;
856 }
857 }
858
Rob Clark0ae349a2017-08-09 10:43:04 -0400859 platform_set_drvdata(pdev, qcom_iommu);
860
861 pm_runtime_enable(dev);
862
863 /* register context bank devices, which are child nodes: */
864 ret = devm_of_platform_populate(dev);
865 if (ret) {
866 dev_err(dev, "Failed to populate iommu contexts\n");
867 return ret;
868 }
869
870 ret = iommu_device_sysfs_add(&qcom_iommu->iommu, dev, NULL,
871 dev_name(dev));
872 if (ret) {
873 dev_err(dev, "Failed to register iommu in sysfs\n");
874 return ret;
875 }
876
877 iommu_device_set_ops(&qcom_iommu->iommu, &qcom_iommu_ops);
878 iommu_device_set_fwnode(&qcom_iommu->iommu, dev->fwnode);
879
880 ret = iommu_device_register(&qcom_iommu->iommu);
881 if (ret) {
882 dev_err(dev, "Failed to register iommu\n");
883 return ret;
884 }
885
886 bus_set_iommu(&platform_bus_type, &qcom_iommu_ops);
887
888 if (qcom_iommu->local_base) {
889 pm_runtime_get_sync(dev);
890 writel_relaxed(0xffffffff, qcom_iommu->local_base + SMMU_INTR_SEL_NS);
891 pm_runtime_put_sync(dev);
892 }
893
894 return 0;
895}
896
897static int qcom_iommu_device_remove(struct platform_device *pdev)
898{
899 struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev);
900
901 bus_set_iommu(&platform_bus_type, NULL);
902
903 pm_runtime_force_suspend(&pdev->dev);
904 platform_set_drvdata(pdev, NULL);
905 iommu_device_sysfs_remove(&qcom_iommu->iommu);
906 iommu_device_unregister(&qcom_iommu->iommu);
907
908 return 0;
909}
910
Arnd Bergmann6ce5b0f2017-08-23 15:42:45 +0200911static int __maybe_unused qcom_iommu_resume(struct device *dev)
Rob Clark0ae349a2017-08-09 10:43:04 -0400912{
Wolfram Sang7d1bf142018-04-19 16:05:54 +0200913 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
Rob Clark0ae349a2017-08-09 10:43:04 -0400914
915 return qcom_iommu_enable_clocks(qcom_iommu);
916}
917
Arnd Bergmann6ce5b0f2017-08-23 15:42:45 +0200918static int __maybe_unused qcom_iommu_suspend(struct device *dev)
Rob Clark0ae349a2017-08-09 10:43:04 -0400919{
Wolfram Sang7d1bf142018-04-19 16:05:54 +0200920 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
Rob Clark0ae349a2017-08-09 10:43:04 -0400921
922 qcom_iommu_disable_clocks(qcom_iommu);
923
924 return 0;
925}
Rob Clark0ae349a2017-08-09 10:43:04 -0400926
927static const struct dev_pm_ops qcom_iommu_pm_ops = {
928 SET_RUNTIME_PM_OPS(qcom_iommu_suspend, qcom_iommu_resume, NULL)
929 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
930 pm_runtime_force_resume)
931};
932
933static const struct of_device_id qcom_iommu_of_match[] = {
934 { .compatible = "qcom,msm-iommu-v1" },
935 { /* sentinel */ }
936};
Rob Clark0ae349a2017-08-09 10:43:04 -0400937
938static struct platform_driver qcom_iommu_driver = {
939 .driver = {
940 .name = "qcom-iommu",
941 .of_match_table = of_match_ptr(qcom_iommu_of_match),
942 .pm = &qcom_iommu_pm_ops,
943 },
944 .probe = qcom_iommu_device_probe,
945 .remove = qcom_iommu_device_remove,
946};
947
948static int __init qcom_iommu_init(void)
949{
950 int ret;
951
952 ret = platform_driver_register(&qcom_iommu_ctx_driver);
953 if (ret)
954 return ret;
955
956 ret = platform_driver_register(&qcom_iommu_driver);
957 if (ret)
958 platform_driver_unregister(&qcom_iommu_ctx_driver);
959
960 return ret;
961}
Paul Gortmakerf295cf22018-12-01 14:19:14 -0500962device_initcall(qcom_iommu_init);