blob: 3b6449e2cbf1c47d6d923f28d598d90e7382b043 [file] [log] [blame]
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001/*
Thierry Reding89184652014-04-16 09:24:44 +02002 * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved.
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02003 *
Thierry Reding89184652014-04-16 09:24:44 +02004 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02007 */
8
Thierry Reding804cb542015-03-27 11:07:27 +01009#include <linux/bitops.h>
Thierry Redingd1313e72015-01-23 09:49:25 +010010#include <linux/debugfs.h>
Thierry Redingbc5e6de2013-01-21 11:09:06 +010011#include <linux/err.h>
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020012#include <linux/iommu.h>
Thierry Reding89184652014-04-16 09:24:44 +020013#include <linux/kernel.h>
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +030014#include <linux/of.h>
Thierry Reding89184652014-04-16 09:24:44 +020015#include <linux/of_device.h>
16#include <linux/platform_device.h>
17#include <linux/slab.h>
Joerg Roedel461a6942017-04-26 15:46:20 +020018#include <linux/dma-mapping.h>
Thierry Reding306a7f92014-07-17 13:17:24 +020019
20#include <soc/tegra/ahb.h>
Thierry Reding89184652014-04-16 09:24:44 +020021#include <soc/tegra/mc.h>
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020022
Thierry Reding89184652014-04-16 09:24:44 +020023struct tegra_smmu {
24 void __iomem *regs;
25 struct device *dev;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020026
Thierry Reding89184652014-04-16 09:24:44 +020027 struct tegra_mc *mc;
28 const struct tegra_smmu_soc *soc;
Stephen Warrene6bc5932012-09-04 16:36:15 -060029
Thierry Reding804cb542015-03-27 11:07:27 +010030 unsigned long pfn_mask;
Thierry Reding11cec152015-08-06 14:20:31 +020031 unsigned long tlb_mask;
Thierry Reding804cb542015-03-27 11:07:27 +010032
Thierry Reding89184652014-04-16 09:24:44 +020033 unsigned long *asids;
34 struct mutex lock;
Stephen Warrene6bc5932012-09-04 16:36:15 -060035
Thierry Reding89184652014-04-16 09:24:44 +020036 struct list_head list;
Thierry Redingd1313e72015-01-23 09:49:25 +010037
38 struct dentry *debugfs;
Joerg Roedel0b480e42017-08-09 17:41:52 +020039
40 struct iommu_device iommu; /* IOMMU Core code handle */
Stephen Warrene6bc5932012-09-04 16:36:15 -060041};
42
Thierry Reding89184652014-04-16 09:24:44 +020043struct tegra_smmu_as {
Joerg Roedeld5f1a812015-03-26 13:43:12 +010044 struct iommu_domain domain;
Thierry Reding89184652014-04-16 09:24:44 +020045 struct tegra_smmu *smmu;
46 unsigned int use_count;
Russell King32924c72015-07-27 13:29:31 +010047 u32 *count;
Russell King853520f2015-07-27 13:29:26 +010048 struct page **pts;
Thierry Reding89184652014-04-16 09:24:44 +020049 struct page *pd;
Russell Kinge3c97192015-07-27 13:29:52 +010050 dma_addr_t pd_dma;
Thierry Reding89184652014-04-16 09:24:44 +020051 unsigned id;
52 u32 attr;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +030053};
54
Joerg Roedeld5f1a812015-03-26 13:43:12 +010055static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom)
56{
57 return container_of(dom, struct tegra_smmu_as, domain);
58}
59
Thierry Reding89184652014-04-16 09:24:44 +020060static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
61 unsigned long offset)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020062{
Thierry Reding89184652014-04-16 09:24:44 +020063 writel(value, smmu->regs + offset);
Joerg Roedelfe1229b2013-02-04 20:40:58 +010064}
65
Thierry Reding89184652014-04-16 09:24:44 +020066static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020067{
Thierry Reding89184652014-04-16 09:24:44 +020068 return readl(smmu->regs + offset);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020069}
70
Thierry Reding89184652014-04-16 09:24:44 +020071#define SMMU_CONFIG 0x010
72#define SMMU_CONFIG_ENABLE (1 << 0)
73
74#define SMMU_TLB_CONFIG 0x14
75#define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
76#define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
Thierry Reding11cec152015-08-06 14:20:31 +020077#define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
78 ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
Thierry Reding89184652014-04-16 09:24:44 +020079
80#define SMMU_PTC_CONFIG 0x18
81#define SMMU_PTC_CONFIG_ENABLE (1 << 29)
82#define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
83#define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
84
85#define SMMU_PTB_ASID 0x01c
86#define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
87
88#define SMMU_PTB_DATA 0x020
Russell Kinge3c97192015-07-27 13:29:52 +010089#define SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr))
Thierry Reding89184652014-04-16 09:24:44 +020090
Russell Kinge3c97192015-07-27 13:29:52 +010091#define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr))
Thierry Reding89184652014-04-16 09:24:44 +020092
93#define SMMU_TLB_FLUSH 0x030
94#define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
95#define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
96#define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
97#define SMMU_TLB_FLUSH_ASID(x) (((x) & 0x7f) << 24)
98#define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
99 SMMU_TLB_FLUSH_VA_MATCH_SECTION)
100#define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
101 SMMU_TLB_FLUSH_VA_MATCH_GROUP)
102#define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31)
103
104#define SMMU_PTC_FLUSH 0x034
105#define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
106#define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
107
108#define SMMU_PTC_FLUSH_HI 0x9b8
109#define SMMU_PTC_FLUSH_HI_MASK 0x3
110
111/* per-SWGROUP SMMU_*_ASID register */
112#define SMMU_ASID_ENABLE (1 << 31)
113#define SMMU_ASID_MASK 0x7f
114#define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
115
116/* page table definitions */
117#define SMMU_NUM_PDE 1024
118#define SMMU_NUM_PTE 1024
119
120#define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
121#define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
122
123#define SMMU_PDE_SHIFT 22
124#define SMMU_PTE_SHIFT 12
125
Thierry Reding89184652014-04-16 09:24:44 +0200126#define SMMU_PD_READABLE (1 << 31)
127#define SMMU_PD_WRITABLE (1 << 30)
128#define SMMU_PD_NONSECURE (1 << 29)
129
130#define SMMU_PDE_READABLE (1 << 31)
131#define SMMU_PDE_WRITABLE (1 << 30)
132#define SMMU_PDE_NONSECURE (1 << 29)
133#define SMMU_PDE_NEXT (1 << 28)
134
135#define SMMU_PTE_READABLE (1 << 31)
136#define SMMU_PTE_WRITABLE (1 << 30)
137#define SMMU_PTE_NONSECURE (1 << 29)
138
139#define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
140 SMMU_PDE_NONSECURE)
141#define SMMU_PTE_ATTR (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \
142 SMMU_PTE_NONSECURE)
143
Russell King34d35f82015-07-27 13:29:16 +0100144static unsigned int iova_pd_index(unsigned long iova)
145{
146 return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
147}
148
149static unsigned int iova_pt_index(unsigned long iova)
150{
151 return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1);
152}
153
Russell Kinge3c97192015-07-27 13:29:52 +0100154static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
Russell King4b3c7d12015-07-27 13:29:36 +0100155{
Russell Kinge3c97192015-07-27 13:29:52 +0100156 addr >>= 12;
157 return (addr & smmu->pfn_mask) == addr;
158}
Russell King4b3c7d12015-07-27 13:29:36 +0100159
Russell Kinge3c97192015-07-27 13:29:52 +0100160static dma_addr_t smmu_pde_to_dma(u32 pde)
161{
162 return pde << 12;
Russell King4b3c7d12015-07-27 13:29:36 +0100163}
164
Russell Kingb8fe0382015-07-27 13:29:41 +0100165static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
166{
167 smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
168}
169
Russell Kinge3c97192015-07-27 13:29:52 +0100170static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma,
Thierry Reding89184652014-04-16 09:24:44 +0200171 unsigned long offset)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200172{
Thierry Reding89184652014-04-16 09:24:44 +0200173 u32 value;
Hiroshi Doyua6870e92013-01-31 10:14:10 +0200174
Russell Kingb8fe0382015-07-27 13:29:41 +0100175 offset &= ~(smmu->mc->soc->atom_size - 1);
Hiroshi Doyua6870e92013-01-31 10:14:10 +0200176
Russell Kingb8fe0382015-07-27 13:29:41 +0100177 if (smmu->mc->soc->num_address_bits > 32) {
Russell Kinge3c97192015-07-27 13:29:52 +0100178#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
179 value = (dma >> 32) & SMMU_PTC_FLUSH_HI_MASK;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200180#else
Russell Kingb8fe0382015-07-27 13:29:41 +0100181 value = 0;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200182#endif
Russell Kingb8fe0382015-07-27 13:29:41 +0100183 smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200184 }
Hiroshi DOYU9e971a02012-07-02 14:26:38 +0300185
Russell Kinge3c97192015-07-27 13:29:52 +0100186 value = (dma + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
Thierry Reding89184652014-04-16 09:24:44 +0200187 smmu_writel(smmu, value, SMMU_PTC_FLUSH);
188}
189
190static inline void smmu_flush_tlb(struct tegra_smmu *smmu)
191{
192 smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH);
193}
194
195static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
196 unsigned long asid)
197{
198 u32 value;
199
200 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
201 SMMU_TLB_FLUSH_VA_MATCH_ALL;
202 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
203}
204
205static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
206 unsigned long asid,
207 unsigned long iova)
208{
209 u32 value;
210
211 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
212 SMMU_TLB_FLUSH_VA_SECTION(iova);
213 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
214}
215
216static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
217 unsigned long asid,
218 unsigned long iova)
219{
220 u32 value;
221
222 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
223 SMMU_TLB_FLUSH_VA_GROUP(iova);
224 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
225}
226
227static inline void smmu_flush(struct tegra_smmu *smmu)
228{
229 smmu_readl(smmu, SMMU_CONFIG);
230}
231
232static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
233{
234 unsigned long id;
235
236 mutex_lock(&smmu->lock);
237
238 id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
239 if (id >= smmu->soc->num_asids) {
240 mutex_unlock(&smmu->lock);
241 return -ENOSPC;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200242 }
Hiroshi DOYU9e971a02012-07-02 14:26:38 +0300243
Thierry Reding89184652014-04-16 09:24:44 +0200244 set_bit(id, smmu->asids);
245 *idp = id;
Hiroshi DOYU9e971a02012-07-02 14:26:38 +0300246
Thierry Reding89184652014-04-16 09:24:44 +0200247 mutex_unlock(&smmu->lock);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200248 return 0;
249}
250
Thierry Reding89184652014-04-16 09:24:44 +0200251static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200252{
Thierry Reding89184652014-04-16 09:24:44 +0200253 mutex_lock(&smmu->lock);
254 clear_bit(id, smmu->asids);
255 mutex_unlock(&smmu->lock);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200256}
257
Thierry Reding89184652014-04-16 09:24:44 +0200258static bool tegra_smmu_capable(enum iommu_cap cap)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200259{
Joerg Roedel7c2aa642014-09-05 10:51:37 +0200260 return false;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200261}
262
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100263static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200264{
Thierry Reding89184652014-04-16 09:24:44 +0200265 struct tegra_smmu_as *as;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200266
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100267 if (type != IOMMU_DOMAIN_UNMANAGED)
268 return NULL;
269
Thierry Reding89184652014-04-16 09:24:44 +0200270 as = kzalloc(sizeof(*as), GFP_KERNEL);
271 if (!as)
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100272 return NULL;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200273
Thierry Reding89184652014-04-16 09:24:44 +0200274 as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200275
Russell King707917c2015-07-27 13:30:02 +0100276 as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
Thierry Reding89184652014-04-16 09:24:44 +0200277 if (!as->pd) {
278 kfree(as);
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100279 return NULL;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200280 }
281
Russell King32924c72015-07-27 13:29:31 +0100282 as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
Thierry Reding89184652014-04-16 09:24:44 +0200283 if (!as->count) {
284 __free_page(as->pd);
285 kfree(as);
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100286 return NULL;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200287 }
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200288
Russell King853520f2015-07-27 13:29:26 +0100289 as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
290 if (!as->pts) {
Russell King32924c72015-07-27 13:29:31 +0100291 kfree(as->count);
Russell King853520f2015-07-27 13:29:26 +0100292 __free_page(as->pd);
293 kfree(as);
294 return NULL;
295 }
296
Thierry Reding471d9142015-03-27 11:07:25 +0100297 /* setup aperture */
Joerg Roedel7f65ef02015-04-02 13:33:19 +0200298 as->domain.geometry.aperture_start = 0;
299 as->domain.geometry.aperture_end = 0xffffffff;
300 as->domain.geometry.force_aperture = true;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200301
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100302 return &as->domain;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200303}
304
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100305static void tegra_smmu_domain_free(struct iommu_domain *domain)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200306{
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100307 struct tegra_smmu_as *as = to_smmu_as(domain);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200308
Thierry Reding89184652014-04-16 09:24:44 +0200309 /* TODO: free page directory and page tables */
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200310
Thierry Reding89184652014-04-16 09:24:44 +0200311 kfree(as);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200312}
313
Thierry Reding89184652014-04-16 09:24:44 +0200314static const struct tegra_smmu_swgroup *
315tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup)
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300316{
Thierry Reding89184652014-04-16 09:24:44 +0200317 const struct tegra_smmu_swgroup *group = NULL;
318 unsigned int i;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300319
Thierry Reding89184652014-04-16 09:24:44 +0200320 for (i = 0; i < smmu->soc->num_swgroups; i++) {
321 if (smmu->soc->swgroups[i].swgroup == swgroup) {
322 group = &smmu->soc->swgroups[i];
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300323 break;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300324 }
325 }
326
Thierry Reding89184652014-04-16 09:24:44 +0200327 return group;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300328}
329
Thierry Reding89184652014-04-16 09:24:44 +0200330static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
331 unsigned int asid)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200332{
Thierry Reding89184652014-04-16 09:24:44 +0200333 const struct tegra_smmu_swgroup *group;
334 unsigned int i;
335 u32 value;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200336
Thierry Reding89184652014-04-16 09:24:44 +0200337 for (i = 0; i < smmu->soc->num_clients; i++) {
338 const struct tegra_mc_client *client = &smmu->soc->clients[i];
339
340 if (client->swgroup != swgroup)
341 continue;
342
343 value = smmu_readl(smmu, client->smmu.reg);
344 value |= BIT(client->smmu.bit);
345 smmu_writel(smmu, value, client->smmu.reg);
346 }
347
348 group = tegra_smmu_find_swgroup(smmu, swgroup);
349 if (group) {
350 value = smmu_readl(smmu, group->reg);
351 value &= ~SMMU_ASID_MASK;
352 value |= SMMU_ASID_VALUE(asid);
353 value |= SMMU_ASID_ENABLE;
354 smmu_writel(smmu, value, group->reg);
355 }
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200356}
357
Thierry Reding89184652014-04-16 09:24:44 +0200358static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
359 unsigned int asid)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200360{
Thierry Reding89184652014-04-16 09:24:44 +0200361 const struct tegra_smmu_swgroup *group;
362 unsigned int i;
363 u32 value;
364
365 group = tegra_smmu_find_swgroup(smmu, swgroup);
366 if (group) {
367 value = smmu_readl(smmu, group->reg);
368 value &= ~SMMU_ASID_MASK;
369 value |= SMMU_ASID_VALUE(asid);
370 value &= ~SMMU_ASID_ENABLE;
371 smmu_writel(smmu, value, group->reg);
372 }
373
374 for (i = 0; i < smmu->soc->num_clients; i++) {
375 const struct tegra_mc_client *client = &smmu->soc->clients[i];
376
377 if (client->swgroup != swgroup)
378 continue;
379
380 value = smmu_readl(smmu, client->smmu.reg);
381 value &= ~BIT(client->smmu.bit);
382 smmu_writel(smmu, value, client->smmu.reg);
383 }
384}
385
386static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
387 struct tegra_smmu_as *as)
388{
389 u32 value;
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300390 int err;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200391
Thierry Reding89184652014-04-16 09:24:44 +0200392 if (as->use_count > 0) {
393 as->use_count++;
394 return 0;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200395 }
396
Russell Kinge3c97192015-07-27 13:29:52 +0100397 as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
398 DMA_TO_DEVICE);
399 if (dma_mapping_error(smmu->dev, as->pd_dma))
400 return -ENOMEM;
401
402 /* We can't handle 64-bit DMA addresses */
403 if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
404 err = -ENOMEM;
405 goto err_unmap;
406 }
407
Thierry Reding89184652014-04-16 09:24:44 +0200408 err = tegra_smmu_alloc_asid(smmu, &as->id);
409 if (err < 0)
Russell Kinge3c97192015-07-27 13:29:52 +0100410 goto err_unmap;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200411
Russell Kinge3c97192015-07-27 13:29:52 +0100412 smmu_flush_ptc(smmu, as->pd_dma, 0);
Thierry Reding89184652014-04-16 09:24:44 +0200413 smmu_flush_tlb_asid(smmu, as->id);
414
415 smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
Russell Kinge3c97192015-07-27 13:29:52 +0100416 value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr);
Thierry Reding89184652014-04-16 09:24:44 +0200417 smmu_writel(smmu, value, SMMU_PTB_DATA);
418 smmu_flush(smmu);
419
420 as->smmu = smmu;
421 as->use_count++;
422
423 return 0;
Russell Kinge3c97192015-07-27 13:29:52 +0100424
425err_unmap:
426 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
427 return err;
Thierry Reding89184652014-04-16 09:24:44 +0200428}
429
430static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
431 struct tegra_smmu_as *as)
432{
433 if (--as->use_count > 0)
434 return;
435
436 tegra_smmu_free_asid(smmu, as->id);
Russell Kinge3c97192015-07-27 13:29:52 +0100437
438 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
439
Thierry Reding89184652014-04-16 09:24:44 +0200440 as->smmu = NULL;
441}
442
443static int tegra_smmu_attach_dev(struct iommu_domain *domain,
444 struct device *dev)
445{
446 struct tegra_smmu *smmu = dev->archdata.iommu;
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100447 struct tegra_smmu_as *as = to_smmu_as(domain);
Thierry Reding89184652014-04-16 09:24:44 +0200448 struct device_node *np = dev->of_node;
449 struct of_phandle_args args;
450 unsigned int index = 0;
451 int err = 0;
452
453 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
454 &args)) {
455 unsigned int swgroup = args.args[0];
456
457 if (args.np != smmu->dev->of_node) {
458 of_node_put(args.np);
459 continue;
460 }
461
462 of_node_put(args.np);
463
464 err = tegra_smmu_as_prepare(smmu, as);
465 if (err < 0)
466 return err;
467
468 tegra_smmu_enable(smmu, swgroup, as->id);
469 index++;
470 }
471
472 if (index == 0)
473 return -ENODEV;
474
475 return 0;
476}
477
478static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
479{
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100480 struct tegra_smmu_as *as = to_smmu_as(domain);
Thierry Reding89184652014-04-16 09:24:44 +0200481 struct device_node *np = dev->of_node;
482 struct tegra_smmu *smmu = as->smmu;
483 struct of_phandle_args args;
484 unsigned int index = 0;
485
486 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
487 &args)) {
488 unsigned int swgroup = args.args[0];
489
490 if (args.np != smmu->dev->of_node) {
491 of_node_put(args.np);
492 continue;
493 }
494
495 of_node_put(args.np);
496
497 tegra_smmu_disable(smmu, swgroup, as->id);
498 tegra_smmu_as_unprepare(smmu, as);
499 index++;
500 }
501}
502
Russell King4080e992015-07-27 13:30:12 +0100503static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
504 u32 value)
505{
506 unsigned int pd_index = iova_pd_index(iova);
507 struct tegra_smmu *smmu = as->smmu;
508 u32 *pd = page_address(as->pd);
509 unsigned long offset = pd_index * sizeof(*pd);
510
511 /* Set the page directory entry first */
512 pd[pd_index] = value;
513
514 /* The flush the page directory entry from caches */
515 dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
516 sizeof(*pd), DMA_TO_DEVICE);
517
518 /* And flush the iommu */
519 smmu_flush_ptc(smmu, as->pd_dma, offset);
520 smmu_flush_tlb_section(smmu, as->id, iova);
521 smmu_flush(smmu);
522}
523
Russell King0b42c7c2015-07-27 13:29:21 +0100524static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
525{
526 u32 *pt = page_address(pt_page);
527
528 return pt + iova_pt_index(iova);
529}
530
531static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
Russell Kinge3c97192015-07-27 13:29:52 +0100532 dma_addr_t *dmap)
Russell King0b42c7c2015-07-27 13:29:21 +0100533{
534 unsigned int pd_index = iova_pd_index(iova);
535 struct page *pt_page;
Russell Kinge3c97192015-07-27 13:29:52 +0100536 u32 *pd;
Russell King0b42c7c2015-07-27 13:29:21 +0100537
Russell King853520f2015-07-27 13:29:26 +0100538 pt_page = as->pts[pd_index];
539 if (!pt_page)
Russell King0b42c7c2015-07-27 13:29:21 +0100540 return NULL;
541
Russell Kinge3c97192015-07-27 13:29:52 +0100542 pd = page_address(as->pd);
543 *dmap = smmu_pde_to_dma(pd[pd_index]);
Russell King0b42c7c2015-07-27 13:29:21 +0100544
545 return tegra_smmu_pte_offset(pt_page, iova);
546}
547
Thierry Reding89184652014-04-16 09:24:44 +0200548static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
Russell Kinge3c97192015-07-27 13:29:52 +0100549 dma_addr_t *dmap)
Thierry Reding89184652014-04-16 09:24:44 +0200550{
Russell King34d35f82015-07-27 13:29:16 +0100551 unsigned int pde = iova_pd_index(iova);
Thierry Reding89184652014-04-16 09:24:44 +0200552 struct tegra_smmu *smmu = as->smmu;
Thierry Reding89184652014-04-16 09:24:44 +0200553
Russell King853520f2015-07-27 13:29:26 +0100554 if (!as->pts[pde]) {
Russell Kinge3c97192015-07-27 13:29:52 +0100555 struct page *page;
556 dma_addr_t dma;
557
Russell King707917c2015-07-27 13:30:02 +0100558 page = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
Thierry Reding89184652014-04-16 09:24:44 +0200559 if (!page)
560 return NULL;
561
Russell Kinge3c97192015-07-27 13:29:52 +0100562 dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
563 DMA_TO_DEVICE);
564 if (dma_mapping_error(smmu->dev, dma)) {
565 __free_page(page);
566 return NULL;
567 }
568
569 if (!smmu_dma_addr_valid(smmu, dma)) {
570 dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT,
571 DMA_TO_DEVICE);
572 __free_page(page);
573 return NULL;
574 }
575
Russell King853520f2015-07-27 13:29:26 +0100576 as->pts[pde] = page;
577
Russell King4080e992015-07-27 13:30:12 +0100578 tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR |
579 SMMU_PDE_NEXT));
Russell Kinge3c97192015-07-27 13:29:52 +0100580
581 *dmap = dma;
Thierry Reding89184652014-04-16 09:24:44 +0200582 } else {
Russell King4080e992015-07-27 13:30:12 +0100583 u32 *pd = page_address(as->pd);
584
Russell Kinge3c97192015-07-27 13:29:52 +0100585 *dmap = smmu_pde_to_dma(pd[pde]);
Thierry Reding89184652014-04-16 09:24:44 +0200586 }
587
Russell King7ffc6f02015-08-06 14:56:39 +0200588 return tegra_smmu_pte_offset(as->pts[pde], iova);
589}
Russell King0b42c7c2015-07-27 13:29:21 +0100590
Russell King7ffc6f02015-08-06 14:56:39 +0200591static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova)
592{
593 unsigned int pd_index = iova_pd_index(iova);
Thierry Reding89184652014-04-16 09:24:44 +0200594
Russell King7ffc6f02015-08-06 14:56:39 +0200595 as->count[pd_index]++;
Thierry Reding89184652014-04-16 09:24:44 +0200596}
597
Russell Kingb98e34f2015-07-27 13:29:05 +0100598static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
Thierry Reding89184652014-04-16 09:24:44 +0200599{
Russell King34d35f82015-07-27 13:29:16 +0100600 unsigned int pde = iova_pd_index(iova);
Russell King853520f2015-07-27 13:29:26 +0100601 struct page *page = as->pts[pde];
Thierry Reding89184652014-04-16 09:24:44 +0200602
603 /*
604 * When no entries in this page table are used anymore, return the
605 * memory page to the system.
606 */
Russell King32924c72015-07-27 13:29:31 +0100607 if (--as->count[pde] == 0) {
Russell King4080e992015-07-27 13:30:12 +0100608 struct tegra_smmu *smmu = as->smmu;
609 u32 *pd = page_address(as->pd);
Russell Kinge3c97192015-07-27 13:29:52 +0100610 dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]);
Thierry Reding89184652014-04-16 09:24:44 +0200611
Russell King4080e992015-07-27 13:30:12 +0100612 tegra_smmu_set_pde(as, iova, 0);
Russell Kingb98e34f2015-07-27 13:29:05 +0100613
Russell Kinge3c97192015-07-27 13:29:52 +0100614 dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE);
Russell Kingb98e34f2015-07-27 13:29:05 +0100615 __free_page(page);
Russell King853520f2015-07-27 13:29:26 +0100616 as->pts[pde] = NULL;
Thierry Reding89184652014-04-16 09:24:44 +0200617 }
618}
619
Russell King8482ee52015-07-27 13:29:10 +0100620static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
Russell Kinge3c97192015-07-27 13:29:52 +0100621 u32 *pte, dma_addr_t pte_dma, u32 val)
Russell King8482ee52015-07-27 13:29:10 +0100622{
623 struct tegra_smmu *smmu = as->smmu;
624 unsigned long offset = offset_in_page(pte);
625
626 *pte = val;
627
Russell Kinge3c97192015-07-27 13:29:52 +0100628 dma_sync_single_range_for_device(smmu->dev, pte_dma, offset,
629 4, DMA_TO_DEVICE);
630 smmu_flush_ptc(smmu, pte_dma, offset);
Russell King8482ee52015-07-27 13:29:10 +0100631 smmu_flush_tlb_group(smmu, as->id, iova);
632 smmu_flush(smmu);
633}
634
Thierry Reding89184652014-04-16 09:24:44 +0200635static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
636 phys_addr_t paddr, size_t size, int prot)
637{
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100638 struct tegra_smmu_as *as = to_smmu_as(domain);
Russell Kinge3c97192015-07-27 13:29:52 +0100639 dma_addr_t pte_dma;
Thierry Reding89184652014-04-16 09:24:44 +0200640 u32 *pte;
641
Russell Kinge3c97192015-07-27 13:29:52 +0100642 pte = as_get_pte(as, iova, &pte_dma);
Thierry Reding89184652014-04-16 09:24:44 +0200643 if (!pte)
Hiroshi Doyu0547c2f2012-06-25 14:23:57 +0300644 return -ENOMEM;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200645
Russell King7ffc6f02015-08-06 14:56:39 +0200646 /* If we aren't overwriting a pre-existing entry, increment use */
647 if (*pte == 0)
648 tegra_smmu_pte_get_use(as, iova);
649
Russell Kinge3c97192015-07-27 13:29:52 +0100650 tegra_smmu_set_pte(as, iova, pte, pte_dma,
Russell King8482ee52015-07-27 13:29:10 +0100651 __phys_to_pfn(paddr) | SMMU_PTE_ATTR);
Thierry Reding89184652014-04-16 09:24:44 +0200652
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200653 return 0;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200654}
655
Thierry Reding89184652014-04-16 09:24:44 +0200656static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
657 size_t size)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200658{
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100659 struct tegra_smmu_as *as = to_smmu_as(domain);
Russell Kinge3c97192015-07-27 13:29:52 +0100660 dma_addr_t pte_dma;
Thierry Reding89184652014-04-16 09:24:44 +0200661 u32 *pte;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200662
Russell Kinge3c97192015-07-27 13:29:52 +0100663 pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
Russell Kingb98e34f2015-07-27 13:29:05 +0100664 if (!pte || !*pte)
Thierry Reding89184652014-04-16 09:24:44 +0200665 return 0;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300666
Russell Kinge3c97192015-07-27 13:29:52 +0100667 tegra_smmu_set_pte(as, iova, pte, pte_dma, 0);
Russell Kingb98e34f2015-07-27 13:29:05 +0100668 tegra_smmu_pte_put_use(as, iova);
669
Thierry Reding89184652014-04-16 09:24:44 +0200670 return size;
671}
672
673static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
674 dma_addr_t iova)
675{
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100676 struct tegra_smmu_as *as = to_smmu_as(domain);
Thierry Reding89184652014-04-16 09:24:44 +0200677 unsigned long pfn;
Russell Kinge3c97192015-07-27 13:29:52 +0100678 dma_addr_t pte_dma;
Thierry Reding89184652014-04-16 09:24:44 +0200679 u32 *pte;
680
Russell Kinge3c97192015-07-27 13:29:52 +0100681 pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
Russell King91137852015-07-27 13:29:00 +0100682 if (!pte || !*pte)
683 return 0;
684
Thierry Reding804cb542015-03-27 11:07:27 +0100685 pfn = *pte & as->smmu->pfn_mask;
Thierry Reding89184652014-04-16 09:24:44 +0200686
687 return PFN_PHYS(pfn);
688}
689
690static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
691{
692 struct platform_device *pdev;
693 struct tegra_mc *mc;
694
695 pdev = of_find_device_by_node(np);
696 if (!pdev)
697 return NULL;
698
699 mc = platform_get_drvdata(pdev);
700 if (!mc)
701 return NULL;
702
703 return mc->smmu;
704}
705
706static int tegra_smmu_add_device(struct device *dev)
707{
708 struct device_node *np = dev->of_node;
Robin Murphyd92e1f82017-07-21 13:12:36 +0100709 struct iommu_group *group;
Thierry Reding89184652014-04-16 09:24:44 +0200710 struct of_phandle_args args;
711 unsigned int index = 0;
712
713 while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
714 &args) == 0) {
715 struct tegra_smmu *smmu;
716
717 smmu = tegra_smmu_find(args.np);
718 if (smmu) {
719 /*
720 * Only a single IOMMU master interface is currently
721 * supported by the Linux kernel, so abort after the
722 * first match.
723 */
724 dev->archdata.iommu = smmu;
Joerg Roedel0b480e42017-08-09 17:41:52 +0200725
726 iommu_device_link(&smmu->iommu, dev);
727
Thierry Reding89184652014-04-16 09:24:44 +0200728 break;
729 }
730
731 index++;
732 }
733
Robin Murphyd92e1f82017-07-21 13:12:36 +0100734 group = iommu_group_get_for_dev(dev);
735 if (IS_ERR(group))
736 return PTR_ERR(group);
737
738 iommu_group_put(group);
739
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200740 return 0;
741}
742
Thierry Reding89184652014-04-16 09:24:44 +0200743static void tegra_smmu_remove_device(struct device *dev)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200744{
Joerg Roedel0b480e42017-08-09 17:41:52 +0200745 struct tegra_smmu *smmu = dev->archdata.iommu;
746
747 if (smmu)
748 iommu_device_unlink(&smmu->iommu, dev);
749
Thierry Reding89184652014-04-16 09:24:44 +0200750 dev->archdata.iommu = NULL;
Robin Murphyd92e1f82017-07-21 13:12:36 +0100751 iommu_group_remove_device(dev);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200752}
753
Thierry Reding89184652014-04-16 09:24:44 +0200754static const struct iommu_ops tegra_smmu_ops = {
755 .capable = tegra_smmu_capable,
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100756 .domain_alloc = tegra_smmu_domain_alloc,
757 .domain_free = tegra_smmu_domain_free,
Thierry Reding89184652014-04-16 09:24:44 +0200758 .attach_dev = tegra_smmu_attach_dev,
759 .detach_dev = tegra_smmu_detach_dev,
760 .add_device = tegra_smmu_add_device,
761 .remove_device = tegra_smmu_remove_device,
Robin Murphyd92e1f82017-07-21 13:12:36 +0100762 .device_group = generic_device_group,
Thierry Reding89184652014-04-16 09:24:44 +0200763 .map = tegra_smmu_map,
764 .unmap = tegra_smmu_unmap,
765 .map_sg = default_iommu_map_sg,
766 .iova_to_phys = tegra_smmu_iova_to_phys,
767
768 .pgsize_bitmap = SZ_4K,
769};
770
771static void tegra_smmu_ahb_enable(void)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200772{
Thierry Reding89184652014-04-16 09:24:44 +0200773 static const struct of_device_id ahb_match[] = {
774 { .compatible = "nvidia,tegra30-ahb", },
775 { }
776 };
777 struct device_node *ahb;
778
779 ahb = of_find_matching_node(NULL, ahb_match);
780 if (ahb) {
781 tegra_ahb_enable_smmu(ahb);
782 of_node_put(ahb);
783 }
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200784}
785
Thierry Redingd1313e72015-01-23 09:49:25 +0100786static int tegra_smmu_swgroups_show(struct seq_file *s, void *data)
787{
788 struct tegra_smmu *smmu = s->private;
789 unsigned int i;
790 u32 value;
791
792 seq_printf(s, "swgroup enabled ASID\n");
793 seq_printf(s, "------------------------\n");
794
795 for (i = 0; i < smmu->soc->num_swgroups; i++) {
796 const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i];
797 const char *status;
798 unsigned int asid;
799
800 value = smmu_readl(smmu, group->reg);
801
802 if (value & SMMU_ASID_ENABLE)
803 status = "yes";
804 else
805 status = "no";
806
807 asid = value & SMMU_ASID_MASK;
808
809 seq_printf(s, "%-9s %-7s %#04x\n", group->name, status,
810 asid);
811 }
812
813 return 0;
814}
815
816static int tegra_smmu_swgroups_open(struct inode *inode, struct file *file)
817{
818 return single_open(file, tegra_smmu_swgroups_show, inode->i_private);
819}
820
821static const struct file_operations tegra_smmu_swgroups_fops = {
822 .open = tegra_smmu_swgroups_open,
823 .read = seq_read,
824 .llseek = seq_lseek,
825 .release = single_release,
826};
827
828static int tegra_smmu_clients_show(struct seq_file *s, void *data)
829{
830 struct tegra_smmu *smmu = s->private;
831 unsigned int i;
832 u32 value;
833
834 seq_printf(s, "client enabled\n");
835 seq_printf(s, "--------------------\n");
836
837 for (i = 0; i < smmu->soc->num_clients; i++) {
838 const struct tegra_mc_client *client = &smmu->soc->clients[i];
839 const char *status;
840
841 value = smmu_readl(smmu, client->smmu.reg);
842
843 if (value & BIT(client->smmu.bit))
844 status = "yes";
845 else
846 status = "no";
847
848 seq_printf(s, "%-12s %s\n", client->name, status);
849 }
850
851 return 0;
852}
853
854static int tegra_smmu_clients_open(struct inode *inode, struct file *file)
855{
856 return single_open(file, tegra_smmu_clients_show, inode->i_private);
857}
858
859static const struct file_operations tegra_smmu_clients_fops = {
860 .open = tegra_smmu_clients_open,
861 .read = seq_read,
862 .llseek = seq_lseek,
863 .release = single_release,
864};
865
866static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu)
867{
868 smmu->debugfs = debugfs_create_dir("smmu", NULL);
869 if (!smmu->debugfs)
870 return;
871
872 debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu,
873 &tegra_smmu_swgroups_fops);
874 debugfs_create_file("clients", S_IRUGO, smmu->debugfs, smmu,
875 &tegra_smmu_clients_fops);
876}
877
878static void tegra_smmu_debugfs_exit(struct tegra_smmu *smmu)
879{
880 debugfs_remove_recursive(smmu->debugfs);
881}
882
Thierry Reding89184652014-04-16 09:24:44 +0200883struct tegra_smmu *tegra_smmu_probe(struct device *dev,
884 const struct tegra_smmu_soc *soc,
885 struct tegra_mc *mc)
886{
887 struct tegra_smmu *smmu;
888 size_t size;
889 u32 value;
890 int err;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200891
Thierry Reding89184652014-04-16 09:24:44 +0200892 /* This can happen on Tegra20 which doesn't have an SMMU */
893 if (!soc)
894 return NULL;
895
896 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
897 if (!smmu)
898 return ERR_PTR(-ENOMEM);
899
900 /*
901 * This is a bit of a hack. Ideally we'd want to simply return this
902 * value. However the IOMMU registration process will attempt to add
903 * all devices to the IOMMU when bus_set_iommu() is called. In order
904 * not to rely on global variables to track the IOMMU instance, we
905 * set it here so that it can be looked up from the .add_device()
906 * callback via the IOMMU device's .drvdata field.
907 */
908 mc->smmu = smmu;
909
910 size = BITS_TO_LONGS(soc->num_asids) * sizeof(long);
911
912 smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL);
913 if (!smmu->asids)
914 return ERR_PTR(-ENOMEM);
915
916 mutex_init(&smmu->lock);
917
918 smmu->regs = mc->regs;
919 smmu->soc = soc;
920 smmu->dev = dev;
921 smmu->mc = mc;
922
Thierry Reding804cb542015-03-27 11:07:27 +0100923 smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
924 dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
925 mc->soc->num_address_bits, smmu->pfn_mask);
Thierry Reding11cec152015-08-06 14:20:31 +0200926 smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1;
927 dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
928 smmu->tlb_mask);
Thierry Reding804cb542015-03-27 11:07:27 +0100929
Thierry Reding89184652014-04-16 09:24:44 +0200930 value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
931
932 if (soc->supports_request_limit)
933 value |= SMMU_PTC_CONFIG_REQ_LIMIT(8);
934
935 smmu_writel(smmu, value, SMMU_PTC_CONFIG);
936
937 value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
Thierry Reding11cec152015-08-06 14:20:31 +0200938 SMMU_TLB_CONFIG_ACTIVE_LINES(smmu);
Thierry Reding89184652014-04-16 09:24:44 +0200939
940 if (soc->supports_round_robin_arbitration)
941 value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
942
943 smmu_writel(smmu, value, SMMU_TLB_CONFIG);
944
Russell Kingb8fe0382015-07-27 13:29:41 +0100945 smmu_flush_ptc_all(smmu);
Thierry Reding89184652014-04-16 09:24:44 +0200946 smmu_flush_tlb(smmu);
947 smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
948 smmu_flush(smmu);
949
950 tegra_smmu_ahb_enable();
951
Joerg Roedel0b480e42017-08-09 17:41:52 +0200952 err = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, dev_name(dev));
953 if (err)
954 return ERR_PTR(err);
955
956 iommu_device_set_ops(&smmu->iommu, &tegra_smmu_ops);
957
958 err = iommu_device_register(&smmu->iommu);
959 if (err) {
960 iommu_device_sysfs_remove(&smmu->iommu);
961 return ERR_PTR(err);
962 }
963
Joerg Roedel96302d82017-08-30 15:06:43 +0200964 err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops);
965 if (err < 0) {
966 iommu_device_unregister(&smmu->iommu);
967 iommu_device_sysfs_remove(&smmu->iommu);
968 return ERR_PTR(err);
969 }
970
Thierry Redingd1313e72015-01-23 09:49:25 +0100971 if (IS_ENABLED(CONFIG_DEBUG_FS))
972 tegra_smmu_debugfs_init(smmu);
973
Thierry Reding89184652014-04-16 09:24:44 +0200974 return smmu;
975}
Thierry Redingd1313e72015-01-23 09:49:25 +0100976
977void tegra_smmu_remove(struct tegra_smmu *smmu)
978{
Joerg Roedel0b480e42017-08-09 17:41:52 +0200979 iommu_device_unregister(&smmu->iommu);
980 iommu_device_sysfs_remove(&smmu->iommu);
981
Thierry Redingd1313e72015-01-23 09:49:25 +0100982 if (IS_ENABLED(CONFIG_DEBUG_FS))
983 tegra_smmu_debugfs_exit(smmu);
984}