blob: 829931cd105a53d15d4b8bba393a74071d4c6ea2 [file] [log] [blame]
KyongHo Cho2a965362012-05-12 05:56:09 +09001/* linux/drivers/iommu/exynos_iommu.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
12#define DEBUG
13#endif
14
15#include <linux/io.h>
16#include <linux/interrupt.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19#include <linux/pm_runtime.h>
20#include <linux/clk.h>
21#include <linux/err.h>
22#include <linux/mm.h>
23#include <linux/iommu.h>
24#include <linux/errno.h>
25#include <linux/list.h>
26#include <linux/memblock.h>
27#include <linux/export.h>
28
29#include <asm/cacheflush.h>
30#include <asm/pgtable.h>
31
Cho KyongHod09d78f2014-05-12 11:44:58 +053032typedef u32 sysmmu_iova_t;
33typedef u32 sysmmu_pte_t;
34
Sachin Kamatf171aba2014-08-04 10:06:28 +053035/* We do not consider super section mapping (16MB) */
KyongHo Cho2a965362012-05-12 05:56:09 +090036#define SECT_ORDER 20
37#define LPAGE_ORDER 16
38#define SPAGE_ORDER 12
39
40#define SECT_SIZE (1 << SECT_ORDER)
41#define LPAGE_SIZE (1 << LPAGE_ORDER)
42#define SPAGE_SIZE (1 << SPAGE_ORDER)
43
44#define SECT_MASK (~(SECT_SIZE - 1))
45#define LPAGE_MASK (~(LPAGE_SIZE - 1))
46#define SPAGE_MASK (~(SPAGE_SIZE - 1))
47
Cho KyongHo66a7ed82014-05-12 11:45:04 +053048#define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
49 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
50#define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
51#define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
52#define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
53 ((*(sent) & 3) == 1))
KyongHo Cho2a965362012-05-12 05:56:09 +090054#define lv1ent_section(sent) ((*(sent) & 3) == 2)
55
56#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
57#define lv2ent_small(pent) ((*(pent) & 2) == 2)
58#define lv2ent_large(pent) ((*(pent) & 3) == 1)
59
Cho KyongHod09d78f2014-05-12 11:44:58 +053060static u32 sysmmu_page_offset(sysmmu_iova_t iova, u32 size)
61{
62 return iova & (size - 1);
63}
KyongHo Cho2a965362012-05-12 05:56:09 +090064
Cho KyongHod09d78f2014-05-12 11:44:58 +053065#define section_phys(sent) (*(sent) & SECT_MASK)
66#define section_offs(iova) sysmmu_page_offset((iova), SECT_SIZE)
67#define lpage_phys(pent) (*(pent) & LPAGE_MASK)
68#define lpage_offs(iova) sysmmu_page_offset((iova), LPAGE_SIZE)
69#define spage_phys(pent) (*(pent) & SPAGE_MASK)
70#define spage_offs(iova) sysmmu_page_offset((iova), SPAGE_SIZE)
KyongHo Cho2a965362012-05-12 05:56:09 +090071
72#define NUM_LV1ENTRIES 4096
Cho KyongHod09d78f2014-05-12 11:44:58 +053073#define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
KyongHo Cho2a965362012-05-12 05:56:09 +090074
Cho KyongHod09d78f2014-05-12 11:44:58 +053075static u32 lv1ent_offset(sysmmu_iova_t iova)
76{
77 return iova >> SECT_ORDER;
78}
79
80static u32 lv2ent_offset(sysmmu_iova_t iova)
81{
82 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
83}
84
85#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
KyongHo Cho2a965362012-05-12 05:56:09 +090086
87#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
88
89#define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
90
91#define mk_lv1ent_sect(pa) ((pa) | 2)
92#define mk_lv1ent_page(pa) ((pa) | 1)
93#define mk_lv2ent_lpage(pa) ((pa) | 1)
94#define mk_lv2ent_spage(pa) ((pa) | 2)
95
96#define CTRL_ENABLE 0x5
97#define CTRL_BLOCK 0x7
98#define CTRL_DISABLE 0x0
99
Cho KyongHoeeb51842014-05-12 11:45:03 +0530100#define CFG_LRU 0x1
101#define CFG_QOS(n) ((n & 0xF) << 7)
102#define CFG_MASK 0x0150FFFF /* Selecting bit 0-15, 20, 22 and 24 */
103#define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
104#define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
105#define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
106
KyongHo Cho2a965362012-05-12 05:56:09 +0900107#define REG_MMU_CTRL 0x000
108#define REG_MMU_CFG 0x004
109#define REG_MMU_STATUS 0x008
110#define REG_MMU_FLUSH 0x00C
111#define REG_MMU_FLUSH_ENTRY 0x010
112#define REG_PT_BASE_ADDR 0x014
113#define REG_INT_STATUS 0x018
114#define REG_INT_CLEAR 0x01C
115
116#define REG_PAGE_FAULT_ADDR 0x024
117#define REG_AW_FAULT_ADDR 0x028
118#define REG_AR_FAULT_ADDR 0x02C
119#define REG_DEFAULT_SLAVE_ADDR 0x030
120
121#define REG_MMU_VERSION 0x034
122
Cho KyongHoeeb51842014-05-12 11:45:03 +0530123#define MMU_MAJ_VER(val) ((val) >> 7)
124#define MMU_MIN_VER(val) ((val) & 0x7F)
125#define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
126
127#define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
128
KyongHo Cho2a965362012-05-12 05:56:09 +0900129#define REG_PB0_SADDR 0x04C
130#define REG_PB0_EADDR 0x050
131#define REG_PB1_SADDR 0x054
132#define REG_PB1_EADDR 0x058
133
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530134#define has_sysmmu(dev) (dev->archdata.iommu != NULL)
135
Cho KyongHo734c3c72014-05-12 11:44:48 +0530136static struct kmem_cache *lv2table_kmem_cache;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530137static sysmmu_pte_t *zero_lv2_table;
138#define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
Cho KyongHo734c3c72014-05-12 11:44:48 +0530139
Cho KyongHod09d78f2014-05-12 11:44:58 +0530140static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
KyongHo Cho2a965362012-05-12 05:56:09 +0900141{
142 return pgtable + lv1ent_offset(iova);
143}
144
Cho KyongHod09d78f2014-05-12 11:44:58 +0530145static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
KyongHo Cho2a965362012-05-12 05:56:09 +0900146{
Cho KyongHod09d78f2014-05-12 11:44:58 +0530147 return (sysmmu_pte_t *)phys_to_virt(
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530148 lv2table_base(sent)) + lv2ent_offset(iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900149}
150
151enum exynos_sysmmu_inttype {
152 SYSMMU_PAGEFAULT,
153 SYSMMU_AR_MULTIHIT,
154 SYSMMU_AW_MULTIHIT,
155 SYSMMU_BUSERROR,
156 SYSMMU_AR_SECURITY,
157 SYSMMU_AR_ACCESS,
158 SYSMMU_AW_SECURITY,
159 SYSMMU_AW_PROTECTION, /* 7 */
160 SYSMMU_FAULT_UNKNOWN,
161 SYSMMU_FAULTS_NUM
162};
163
KyongHo Cho2a965362012-05-12 05:56:09 +0900164static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
165 REG_PAGE_FAULT_ADDR,
166 REG_AR_FAULT_ADDR,
167 REG_AW_FAULT_ADDR,
168 REG_DEFAULT_SLAVE_ADDR,
169 REG_AR_FAULT_ADDR,
170 REG_AR_FAULT_ADDR,
171 REG_AW_FAULT_ADDR,
172 REG_AW_FAULT_ADDR
173};
174
175static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
176 "PAGE FAULT",
177 "AR MULTI-HIT FAULT",
178 "AW MULTI-HIT FAULT",
179 "BUS ERROR",
180 "AR SECURITY PROTECTION FAULT",
181 "AR ACCESS PROTECTION FAULT",
182 "AW SECURITY PROTECTION FAULT",
183 "AW ACCESS PROTECTION FAULT",
184 "UNKNOWN FAULT"
185};
186
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530187/* attached to dev.archdata.iommu of the master device */
188struct exynos_iommu_owner {
189 struct list_head client; /* entry of exynos_iommu_domain.clients */
190 struct device *dev;
191 struct device *sysmmu;
192 struct iommu_domain *domain;
193 void *vmm_data; /* IO virtual memory manager's data */
194 spinlock_t lock; /* Lock to preserve consistency of System MMU */
195};
196
KyongHo Cho2a965362012-05-12 05:56:09 +0900197struct exynos_iommu_domain {
198 struct list_head clients; /* list of sysmmu_drvdata.node */
Cho KyongHod09d78f2014-05-12 11:44:58 +0530199 sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
KyongHo Cho2a965362012-05-12 05:56:09 +0900200 short *lv2entcnt; /* free lv2 entry counter for each section */
201 spinlock_t lock; /* lock for this structure */
202 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100203 struct iommu_domain domain; /* generic domain data structure */
KyongHo Cho2a965362012-05-12 05:56:09 +0900204};
205
206struct sysmmu_drvdata {
KyongHo Cho2a965362012-05-12 05:56:09 +0900207 struct device *sysmmu; /* System MMU's device descriptor */
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530208 struct device *master; /* Owner of system MMU */
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530209 void __iomem *sfrbase;
210 struct clk *clk;
Cho KyongHo70605872014-05-12 11:44:55 +0530211 struct clk *clk_master;
KyongHo Cho2a965362012-05-12 05:56:09 +0900212 int activations;
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530213 spinlock_t lock;
KyongHo Cho2a965362012-05-12 05:56:09 +0900214 struct iommu_domain *domain;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530215 phys_addr_t pgtable;
Marek Szyprowski512bd0c2015-05-19 15:20:24 +0200216 unsigned int version;
KyongHo Cho2a965362012-05-12 05:56:09 +0900217};
218
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100219static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
220{
221 return container_of(dom, struct exynos_iommu_domain, domain);
222}
223
KyongHo Cho2a965362012-05-12 05:56:09 +0900224static bool set_sysmmu_active(struct sysmmu_drvdata *data)
225{
226 /* return true if the System MMU was not active previously
227 and it needs to be initialized */
228 return ++data->activations == 1;
229}
230
231static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
232{
233 /* return true if the System MMU is needed to be disabled */
234 BUG_ON(data->activations < 1);
235 return --data->activations == 0;
236}
237
238static bool is_sysmmu_active(struct sysmmu_drvdata *data)
239{
240 return data->activations > 0;
241}
242
243static void sysmmu_unblock(void __iomem *sfrbase)
244{
245 __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
246}
247
248static bool sysmmu_block(void __iomem *sfrbase)
249{
250 int i = 120;
251
252 __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
253 while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
254 --i;
255
256 if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
257 sysmmu_unblock(sfrbase);
258 return false;
259 }
260
261 return true;
262}
263
264static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
265{
266 __raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
267}
268
269static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
Cho KyongHod09d78f2014-05-12 11:44:58 +0530270 sysmmu_iova_t iova, unsigned int num_inv)
KyongHo Cho2a965362012-05-12 05:56:09 +0900271{
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530272 unsigned int i;
Sachin Kamat365409d2014-05-22 09:50:56 +0530273
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530274 for (i = 0; i < num_inv; i++) {
275 __raw_writel((iova & SPAGE_MASK) | 1,
276 sfrbase + REG_MMU_FLUSH_ENTRY);
277 iova += SPAGE_SIZE;
278 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900279}
280
281static void __sysmmu_set_ptbase(void __iomem *sfrbase,
Cho KyongHod09d78f2014-05-12 11:44:58 +0530282 phys_addr_t pgd)
KyongHo Cho2a965362012-05-12 05:56:09 +0900283{
KyongHo Cho2a965362012-05-12 05:56:09 +0900284 __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
285
286 __sysmmu_tlb_invalidate(sfrbase);
287}
288
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530289static void show_fault_information(const char *name,
290 enum exynos_sysmmu_inttype itype,
Cho KyongHod09d78f2014-05-12 11:44:58 +0530291 phys_addr_t pgtable_base, sysmmu_iova_t fault_addr)
KyongHo Cho2a965362012-05-12 05:56:09 +0900292{
Cho KyongHod09d78f2014-05-12 11:44:58 +0530293 sysmmu_pte_t *ent;
KyongHo Cho2a965362012-05-12 05:56:09 +0900294
295 if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
296 itype = SYSMMU_FAULT_UNKNOWN;
297
Cho KyongHod09d78f2014-05-12 11:44:58 +0530298 pr_err("%s occurred at %#x by %s(Page table base: %pa)\n",
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530299 sysmmu_fault_name[itype], fault_addr, name, &pgtable_base);
KyongHo Cho2a965362012-05-12 05:56:09 +0900300
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530301 ent = section_entry(phys_to_virt(pgtable_base), fault_addr);
Cho KyongHod09d78f2014-05-12 11:44:58 +0530302 pr_err("\tLv1 entry: %#x\n", *ent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900303
304 if (lv1ent_page(ent)) {
305 ent = page_entry(ent, fault_addr);
Cho KyongHod09d78f2014-05-12 11:44:58 +0530306 pr_err("\t Lv2 entry: %#x\n", *ent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900307 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900308}
309
310static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
311{
Sachin Kamatf171aba2014-08-04 10:06:28 +0530312 /* SYSMMU is in blocked state when interrupt occurred. */
KyongHo Cho2a965362012-05-12 05:56:09 +0900313 struct sysmmu_drvdata *data = dev_id;
KyongHo Cho2a965362012-05-12 05:56:09 +0900314 enum exynos_sysmmu_inttype itype;
Cho KyongHod09d78f2014-05-12 11:44:58 +0530315 sysmmu_iova_t addr = -1;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530316 int ret = -ENOSYS;
KyongHo Cho2a965362012-05-12 05:56:09 +0900317
KyongHo Cho2a965362012-05-12 05:56:09 +0900318 WARN_ON(!is_sysmmu_active(data));
319
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530320 spin_lock(&data->lock);
321
Cho KyongHo70605872014-05-12 11:44:55 +0530322 if (!IS_ERR(data->clk_master))
323 clk_enable(data->clk_master);
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530324
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530325 itype = (enum exynos_sysmmu_inttype)
326 __ffs(__raw_readl(data->sfrbase + REG_INT_STATUS));
327 if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
KyongHo Cho2a965362012-05-12 05:56:09 +0900328 itype = SYSMMU_FAULT_UNKNOWN;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530329 else
330 addr = __raw_readl(data->sfrbase + fault_reg_offset[itype]);
KyongHo Cho2a965362012-05-12 05:56:09 +0900331
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530332 if (itype == SYSMMU_FAULT_UNKNOWN) {
333 pr_err("%s: Fault is not occurred by System MMU '%s'!\n",
334 __func__, dev_name(data->sysmmu));
335 pr_err("%s: Please check if IRQ is correctly configured.\n",
336 __func__);
337 BUG();
338 } else {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530339 unsigned int base =
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530340 __raw_readl(data->sfrbase + REG_PT_BASE_ADDR);
341 show_fault_information(dev_name(data->sysmmu),
342 itype, base, addr);
343 if (data->domain)
344 ret = report_iommu_fault(data->domain,
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530345 data->master, addr, itype);
KyongHo Cho2a965362012-05-12 05:56:09 +0900346 }
347
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530348 /* fault is not recovered by fault handler */
349 BUG_ON(ret != 0);
KyongHo Cho2a965362012-05-12 05:56:09 +0900350
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530351 __raw_writel(1 << itype, data->sfrbase + REG_INT_CLEAR);
352
353 sysmmu_unblock(data->sfrbase);
KyongHo Cho2a965362012-05-12 05:56:09 +0900354
Cho KyongHo70605872014-05-12 11:44:55 +0530355 if (!IS_ERR(data->clk_master))
356 clk_disable(data->clk_master);
357
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530358 spin_unlock(&data->lock);
KyongHo Cho2a965362012-05-12 05:56:09 +0900359
360 return IRQ_HANDLED;
361}
362
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530363static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data)
KyongHo Cho2a965362012-05-12 05:56:09 +0900364{
Cho KyongHo70605872014-05-12 11:44:55 +0530365 if (!IS_ERR(data->clk_master))
366 clk_enable(data->clk_master);
367
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530368 __raw_writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530369 __raw_writel(0, data->sfrbase + REG_MMU_CFG);
KyongHo Cho2a965362012-05-12 05:56:09 +0900370
Cho KyongHo46c16d12014-05-12 11:44:54 +0530371 clk_disable(data->clk);
Cho KyongHo70605872014-05-12 11:44:55 +0530372 if (!IS_ERR(data->clk_master))
373 clk_disable(data->clk_master);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530374}
KyongHo Cho2a965362012-05-12 05:56:09 +0900375
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530376static bool __sysmmu_disable(struct sysmmu_drvdata *data)
377{
378 bool disabled;
379 unsigned long flags;
380
381 spin_lock_irqsave(&data->lock, flags);
382
383 disabled = set_sysmmu_inactive(data);
384
385 if (disabled) {
386 data->pgtable = 0;
387 data->domain = NULL;
388
389 __sysmmu_disable_nocount(data);
390
391 dev_dbg(data->sysmmu, "Disabled\n");
392 } else {
393 dev_dbg(data->sysmmu, "%d times left to disable\n",
394 data->activations);
395 }
396
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530397 spin_unlock_irqrestore(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900398
KyongHo Cho2a965362012-05-12 05:56:09 +0900399 return disabled;
400}
401
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530402static void __sysmmu_init_config(struct sysmmu_drvdata *data)
403{
Cho KyongHoeeb51842014-05-12 11:45:03 +0530404 unsigned int cfg = CFG_LRU | CFG_QOS(15);
405 unsigned int ver;
406
Marek Szyprowski512bd0c2015-05-19 15:20:24 +0200407 ver = MMU_RAW_VER(__raw_readl(data->sfrbase + REG_MMU_VERSION));
Cho KyongHoeeb51842014-05-12 11:45:03 +0530408 if (MMU_MAJ_VER(ver) == 3) {
409 if (MMU_MIN_VER(ver) >= 2) {
410 cfg |= CFG_FLPDCACHE;
411 if (MMU_MIN_VER(ver) == 3) {
412 cfg |= CFG_ACGEN;
413 cfg &= ~CFG_LRU;
414 } else {
415 cfg |= CFG_SYSSEL;
416 }
417 }
418 }
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530419
420 __raw_writel(cfg, data->sfrbase + REG_MMU_CFG);
Marek Szyprowski512bd0c2015-05-19 15:20:24 +0200421 data->version = ver;
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530422}
423
424static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
425{
426 if (!IS_ERR(data->clk_master))
427 clk_enable(data->clk_master);
428 clk_enable(data->clk);
429
430 __raw_writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
431
432 __sysmmu_init_config(data);
433
434 __sysmmu_set_ptbase(data->sfrbase, data->pgtable);
435
436 __raw_writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
437
438 if (!IS_ERR(data->clk_master))
439 clk_disable(data->clk_master);
440}
441
442static int __sysmmu_enable(struct sysmmu_drvdata *data,
443 phys_addr_t pgtable, struct iommu_domain *domain)
444{
445 int ret = 0;
446 unsigned long flags;
447
448 spin_lock_irqsave(&data->lock, flags);
449 if (set_sysmmu_active(data)) {
450 data->pgtable = pgtable;
451 data->domain = domain;
452
453 __sysmmu_enable_nocount(data);
454
455 dev_dbg(data->sysmmu, "Enabled\n");
456 } else {
457 ret = (pgtable == data->pgtable) ? 1 : -EBUSY;
458
459 dev_dbg(data->sysmmu, "already enabled\n");
460 }
461
462 if (WARN_ON(ret < 0))
463 set_sysmmu_inactive(data); /* decrement count */
464
465 spin_unlock_irqrestore(&data->lock, flags);
466
467 return ret;
468}
469
KyongHo Cho2a965362012-05-12 05:56:09 +0900470/* __exynos_sysmmu_enable: Enables System MMU
471 *
472 * returns -error if an error occurred and System MMU is not enabled,
473 * 0 if the System MMU has been just enabled and 1 if System MMU was already
474 * enabled before.
475 */
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530476static int __exynos_sysmmu_enable(struct device *dev, phys_addr_t pgtable,
477 struct iommu_domain *domain)
KyongHo Cho2a965362012-05-12 05:56:09 +0900478{
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530479 int ret = 0;
KyongHo Cho2a965362012-05-12 05:56:09 +0900480 unsigned long flags;
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530481 struct exynos_iommu_owner *owner = dev->archdata.iommu;
482 struct sysmmu_drvdata *data;
KyongHo Cho2a965362012-05-12 05:56:09 +0900483
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530484 BUG_ON(!has_sysmmu(dev));
KyongHo Cho2a965362012-05-12 05:56:09 +0900485
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530486 spin_lock_irqsave(&owner->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900487
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530488 data = dev_get_drvdata(owner->sysmmu);
KyongHo Cho2a965362012-05-12 05:56:09 +0900489
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530490 ret = __sysmmu_enable(data, pgtable, domain);
491 if (ret >= 0)
492 data->master = dev;
KyongHo Cho2a965362012-05-12 05:56:09 +0900493
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530494 spin_unlock_irqrestore(&owner->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900495
496 return ret;
497}
498
Cho KyongHod09d78f2014-05-12 11:44:58 +0530499int exynos_sysmmu_enable(struct device *dev, phys_addr_t pgtable)
KyongHo Cho2a965362012-05-12 05:56:09 +0900500{
KyongHo Cho2a965362012-05-12 05:56:09 +0900501 BUG_ON(!memblock_is_memory(pgtable));
502
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530503 return __exynos_sysmmu_enable(dev, pgtable, NULL);
KyongHo Cho2a965362012-05-12 05:56:09 +0900504}
505
Sachin Kamat77e38352013-02-06 13:55:17 +0530506static bool exynos_sysmmu_disable(struct device *dev)
KyongHo Cho2a965362012-05-12 05:56:09 +0900507{
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530508 unsigned long flags;
509 bool disabled = true;
510 struct exynos_iommu_owner *owner = dev->archdata.iommu;
511 struct sysmmu_drvdata *data;
KyongHo Cho2a965362012-05-12 05:56:09 +0900512
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530513 BUG_ON(!has_sysmmu(dev));
514
515 spin_lock_irqsave(&owner->lock, flags);
516
517 data = dev_get_drvdata(owner->sysmmu);
518
519 disabled = __sysmmu_disable(data);
520 if (disabled)
521 data->master = NULL;
522
523 spin_unlock_irqrestore(&owner->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900524
525 return disabled;
526}
527
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530528static void __sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
529 sysmmu_iova_t iova)
530{
Marek Szyprowski512bd0c2015-05-19 15:20:24 +0200531 if (data->version == MAKE_MMU_VER(3, 3))
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530532 __raw_writel(iova | 0x1, data->sfrbase + REG_MMU_FLUSH_ENTRY);
533}
534
535static void sysmmu_tlb_invalidate_flpdcache(struct device *dev,
536 sysmmu_iova_t iova)
537{
538 unsigned long flags;
539 struct exynos_iommu_owner *owner = dev->archdata.iommu;
540 struct sysmmu_drvdata *data = dev_get_drvdata(owner->sysmmu);
541
542 if (!IS_ERR(data->clk_master))
543 clk_enable(data->clk_master);
544
545 spin_lock_irqsave(&data->lock, flags);
546 if (is_sysmmu_active(data))
547 __sysmmu_tlb_invalidate_flpdcache(data, iova);
548 spin_unlock_irqrestore(&data->lock, flags);
549
550 if (!IS_ERR(data->clk_master))
551 clk_disable(data->clk_master);
552}
553
Cho KyongHod09d78f2014-05-12 11:44:58 +0530554static void sysmmu_tlb_invalidate_entry(struct device *dev, sysmmu_iova_t iova,
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530555 size_t size)
KyongHo Cho2a965362012-05-12 05:56:09 +0900556{
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530557 struct exynos_iommu_owner *owner = dev->archdata.iommu;
KyongHo Cho2a965362012-05-12 05:56:09 +0900558 unsigned long flags;
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530559 struct sysmmu_drvdata *data;
560
561 data = dev_get_drvdata(owner->sysmmu);
KyongHo Cho2a965362012-05-12 05:56:09 +0900562
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530563 spin_lock_irqsave(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900564 if (is_sysmmu_active(data)) {
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530565 unsigned int num_inv = 1;
Cho KyongHo70605872014-05-12 11:44:55 +0530566
567 if (!IS_ERR(data->clk_master))
568 clk_enable(data->clk_master);
569
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530570 /*
571 * L2TLB invalidation required
572 * 4KB page: 1 invalidation
Sachin Kamatf171aba2014-08-04 10:06:28 +0530573 * 64KB page: 16 invalidations
574 * 1MB page: 64 invalidations
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530575 * because it is set-associative TLB
576 * with 8-way and 64 sets.
577 * 1MB page can be cached in one of all sets.
578 * 64KB page can be one of 16 consecutive sets.
579 */
Marek Szyprowski512bd0c2015-05-19 15:20:24 +0200580 if (MMU_MAJ_VER(data->version) == 2)
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530581 num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
582
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530583 if (sysmmu_block(data->sfrbase)) {
584 __sysmmu_tlb_invalidate_entry(
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530585 data->sfrbase, iova, num_inv);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530586 sysmmu_unblock(data->sfrbase);
KyongHo Cho2a965362012-05-12 05:56:09 +0900587 }
Cho KyongHo70605872014-05-12 11:44:55 +0530588 if (!IS_ERR(data->clk_master))
589 clk_disable(data->clk_master);
KyongHo Cho2a965362012-05-12 05:56:09 +0900590 } else {
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530591 dev_dbg(dev, "disabled. Skipping TLB invalidation @ %#x\n",
592 iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900593 }
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530594 spin_unlock_irqrestore(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900595}
596
597void exynos_sysmmu_tlb_invalidate(struct device *dev)
598{
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530599 struct exynos_iommu_owner *owner = dev->archdata.iommu;
KyongHo Cho2a965362012-05-12 05:56:09 +0900600 unsigned long flags;
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530601 struct sysmmu_drvdata *data;
602
603 data = dev_get_drvdata(owner->sysmmu);
KyongHo Cho2a965362012-05-12 05:56:09 +0900604
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530605 spin_lock_irqsave(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900606 if (is_sysmmu_active(data)) {
Cho KyongHo70605872014-05-12 11:44:55 +0530607 if (!IS_ERR(data->clk_master))
608 clk_enable(data->clk_master);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530609 if (sysmmu_block(data->sfrbase)) {
610 __sysmmu_tlb_invalidate(data->sfrbase);
611 sysmmu_unblock(data->sfrbase);
KyongHo Cho2a965362012-05-12 05:56:09 +0900612 }
Cho KyongHo70605872014-05-12 11:44:55 +0530613 if (!IS_ERR(data->clk_master))
614 clk_disable(data->clk_master);
KyongHo Cho2a965362012-05-12 05:56:09 +0900615 } else {
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530616 dev_dbg(dev, "disabled. Skipping TLB invalidation\n");
KyongHo Cho2a965362012-05-12 05:56:09 +0900617 }
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530618 spin_unlock_irqrestore(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900619}
620
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530621static int __init exynos_sysmmu_probe(struct platform_device *pdev)
KyongHo Cho2a965362012-05-12 05:56:09 +0900622{
Cho KyongHo46c16d12014-05-12 11:44:54 +0530623 int irq, ret;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530624 struct device *dev = &pdev->dev;
KyongHo Cho2a965362012-05-12 05:56:09 +0900625 struct sysmmu_drvdata *data;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530626 struct resource *res;
KyongHo Cho2a965362012-05-12 05:56:09 +0900627
Cho KyongHo46c16d12014-05-12 11:44:54 +0530628 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
629 if (!data)
630 return -ENOMEM;
KyongHo Cho2a965362012-05-12 05:56:09 +0900631
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530632 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Cho KyongHo46c16d12014-05-12 11:44:54 +0530633 data->sfrbase = devm_ioremap_resource(dev, res);
634 if (IS_ERR(data->sfrbase))
635 return PTR_ERR(data->sfrbase);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530636
Cho KyongHo46c16d12014-05-12 11:44:54 +0530637 irq = platform_get_irq(pdev, 0);
638 if (irq <= 0) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +0530639 dev_err(dev, "Unable to find IRQ resource\n");
Cho KyongHo46c16d12014-05-12 11:44:54 +0530640 return irq;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530641 }
642
Cho KyongHo46c16d12014-05-12 11:44:54 +0530643 ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530644 dev_name(dev), data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900645 if (ret) {
Cho KyongHo46c16d12014-05-12 11:44:54 +0530646 dev_err(dev, "Unabled to register handler of irq %d\n", irq);
647 return ret;
KyongHo Cho2a965362012-05-12 05:56:09 +0900648 }
649
Cho KyongHo46c16d12014-05-12 11:44:54 +0530650 data->clk = devm_clk_get(dev, "sysmmu");
651 if (IS_ERR(data->clk)) {
652 dev_err(dev, "Failed to get clock!\n");
653 return PTR_ERR(data->clk);
654 } else {
655 ret = clk_prepare(data->clk);
656 if (ret) {
657 dev_err(dev, "Failed to prepare clk\n");
658 return ret;
659 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900660 }
661
Cho KyongHo70605872014-05-12 11:44:55 +0530662 data->clk_master = devm_clk_get(dev, "master");
663 if (!IS_ERR(data->clk_master)) {
664 ret = clk_prepare(data->clk_master);
665 if (ret) {
666 clk_unprepare(data->clk);
667 dev_err(dev, "Failed to prepare master's clk\n");
668 return ret;
669 }
670 }
671
KyongHo Cho2a965362012-05-12 05:56:09 +0900672 data->sysmmu = dev;
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530673 spin_lock_init(&data->lock);
KyongHo Cho2a965362012-05-12 05:56:09 +0900674
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530675 platform_set_drvdata(pdev, data);
676
Cho KyongHof4723ec2014-05-12 11:44:52 +0530677 pm_runtime_enable(dev);
KyongHo Cho2a965362012-05-12 05:56:09 +0900678
KyongHo Cho2a965362012-05-12 05:56:09 +0900679 return 0;
KyongHo Cho2a965362012-05-12 05:56:09 +0900680}
681
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530682static const struct of_device_id sysmmu_of_match[] __initconst = {
683 { .compatible = "samsung,exynos-sysmmu", },
684 { },
685};
686
687static struct platform_driver exynos_sysmmu_driver __refdata = {
688 .probe = exynos_sysmmu_probe,
689 .driver = {
KyongHo Cho2a965362012-05-12 05:56:09 +0900690 .name = "exynos-sysmmu",
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530691 .of_match_table = sysmmu_of_match,
KyongHo Cho2a965362012-05-12 05:56:09 +0900692 }
693};
694
695static inline void pgtable_flush(void *vastart, void *vaend)
696{
697 dmac_flush_range(vastart, vaend);
698 outer_flush_range(virt_to_phys(vastart),
699 virt_to_phys(vaend));
700}
701
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100702static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
KyongHo Cho2a965362012-05-12 05:56:09 +0900703{
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100704 struct exynos_iommu_domain *exynos_domain;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530705 int i;
KyongHo Cho2a965362012-05-12 05:56:09 +0900706
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100707 if (type != IOMMU_DOMAIN_UNMANAGED)
708 return NULL;
KyongHo Cho2a965362012-05-12 05:56:09 +0900709
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100710 exynos_domain = kzalloc(sizeof(*exynos_domain), GFP_KERNEL);
711 if (!exynos_domain)
712 return NULL;
713
714 exynos_domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
715 if (!exynos_domain->pgtable)
KyongHo Cho2a965362012-05-12 05:56:09 +0900716 goto err_pgtable;
717
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100718 exynos_domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
719 if (!exynos_domain->lv2entcnt)
KyongHo Cho2a965362012-05-12 05:56:09 +0900720 goto err_counter;
721
Sachin Kamatf171aba2014-08-04 10:06:28 +0530722 /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530723 for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100724 exynos_domain->pgtable[i + 0] = ZERO_LV2LINK;
725 exynos_domain->pgtable[i + 1] = ZERO_LV2LINK;
726 exynos_domain->pgtable[i + 2] = ZERO_LV2LINK;
727 exynos_domain->pgtable[i + 3] = ZERO_LV2LINK;
728 exynos_domain->pgtable[i + 4] = ZERO_LV2LINK;
729 exynos_domain->pgtable[i + 5] = ZERO_LV2LINK;
730 exynos_domain->pgtable[i + 6] = ZERO_LV2LINK;
731 exynos_domain->pgtable[i + 7] = ZERO_LV2LINK;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530732 }
733
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100734 pgtable_flush(exynos_domain->pgtable, exynos_domain->pgtable + NUM_LV1ENTRIES);
KyongHo Cho2a965362012-05-12 05:56:09 +0900735
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100736 spin_lock_init(&exynos_domain->lock);
737 spin_lock_init(&exynos_domain->pgtablelock);
738 INIT_LIST_HEAD(&exynos_domain->clients);
KyongHo Cho2a965362012-05-12 05:56:09 +0900739
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100740 exynos_domain->domain.geometry.aperture_start = 0;
741 exynos_domain->domain.geometry.aperture_end = ~0UL;
742 exynos_domain->domain.geometry.force_aperture = true;
Joerg Roedel3177bb72012-07-11 12:41:10 +0200743
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100744 return &exynos_domain->domain;
KyongHo Cho2a965362012-05-12 05:56:09 +0900745
746err_counter:
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100747 free_pages((unsigned long)exynos_domain->pgtable, 2);
KyongHo Cho2a965362012-05-12 05:56:09 +0900748err_pgtable:
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100749 kfree(exynos_domain);
750 return NULL;
KyongHo Cho2a965362012-05-12 05:56:09 +0900751}
752
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100753static void exynos_iommu_domain_free(struct iommu_domain *domain)
KyongHo Cho2a965362012-05-12 05:56:09 +0900754{
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100755 struct exynos_iommu_domain *priv = to_exynos_domain(domain);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530756 struct exynos_iommu_owner *owner;
KyongHo Cho2a965362012-05-12 05:56:09 +0900757 unsigned long flags;
758 int i;
759
760 WARN_ON(!list_empty(&priv->clients));
761
762 spin_lock_irqsave(&priv->lock, flags);
763
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530764 list_for_each_entry(owner, &priv->clients, client) {
765 while (!exynos_sysmmu_disable(owner->dev))
KyongHo Cho2a965362012-05-12 05:56:09 +0900766 ; /* until System MMU is actually disabled */
767 }
768
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530769 while (!list_empty(&priv->clients))
770 list_del_init(priv->clients.next);
771
KyongHo Cho2a965362012-05-12 05:56:09 +0900772 spin_unlock_irqrestore(&priv->lock, flags);
773
774 for (i = 0; i < NUM_LV1ENTRIES; i++)
775 if (lv1ent_page(priv->pgtable + i))
Cho KyongHo734c3c72014-05-12 11:44:48 +0530776 kmem_cache_free(lv2table_kmem_cache,
777 phys_to_virt(lv2table_base(priv->pgtable + i)));
KyongHo Cho2a965362012-05-12 05:56:09 +0900778
779 free_pages((unsigned long)priv->pgtable, 2);
780 free_pages((unsigned long)priv->lv2entcnt, 1);
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100781 kfree(priv);
KyongHo Cho2a965362012-05-12 05:56:09 +0900782}
783
784static int exynos_iommu_attach_device(struct iommu_domain *domain,
785 struct device *dev)
786{
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530787 struct exynos_iommu_owner *owner = dev->archdata.iommu;
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100788 struct exynos_iommu_domain *priv = to_exynos_domain(domain);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530789 phys_addr_t pagetable = virt_to_phys(priv->pgtable);
KyongHo Cho2a965362012-05-12 05:56:09 +0900790 unsigned long flags;
791 int ret;
792
KyongHo Cho2a965362012-05-12 05:56:09 +0900793 spin_lock_irqsave(&priv->lock, flags);
794
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530795 ret = __exynos_sysmmu_enable(dev, pagetable, domain);
KyongHo Cho2a965362012-05-12 05:56:09 +0900796 if (ret == 0) {
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530797 list_add_tail(&owner->client, &priv->clients);
798 owner->domain = domain;
KyongHo Cho2a965362012-05-12 05:56:09 +0900799 }
800
801 spin_unlock_irqrestore(&priv->lock, flags);
802
803 if (ret < 0) {
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530804 dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
805 __func__, &pagetable);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530806 return ret;
KyongHo Cho2a965362012-05-12 05:56:09 +0900807 }
808
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530809 dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
810 __func__, &pagetable, (ret == 0) ? "" : ", again");
811
KyongHo Cho2a965362012-05-12 05:56:09 +0900812 return ret;
813}
814
815static void exynos_iommu_detach_device(struct iommu_domain *domain,
816 struct device *dev)
817{
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530818 struct exynos_iommu_owner *owner;
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100819 struct exynos_iommu_domain *priv = to_exynos_domain(domain);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530820 phys_addr_t pagetable = virt_to_phys(priv->pgtable);
KyongHo Cho2a965362012-05-12 05:56:09 +0900821 unsigned long flags;
KyongHo Cho2a965362012-05-12 05:56:09 +0900822
823 spin_lock_irqsave(&priv->lock, flags);
824
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530825 list_for_each_entry(owner, &priv->clients, client) {
826 if (owner == dev->archdata.iommu) {
827 if (exynos_sysmmu_disable(dev)) {
828 list_del_init(&owner->client);
829 owner->domain = NULL;
830 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900831 break;
832 }
833 }
834
KyongHo Cho2a965362012-05-12 05:56:09 +0900835 spin_unlock_irqrestore(&priv->lock, flags);
836
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530837 if (owner == dev->archdata.iommu)
838 dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
839 __func__, &pagetable);
840 else
841 dev_err(dev, "%s: No IOMMU is attached\n", __func__);
KyongHo Cho2a965362012-05-12 05:56:09 +0900842}
843
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530844static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *priv,
845 sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
KyongHo Cho2a965362012-05-12 05:56:09 +0900846{
Cho KyongHo61128f02014-05-12 11:44:47 +0530847 if (lv1ent_section(sent)) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530848 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
Cho KyongHo61128f02014-05-12 11:44:47 +0530849 return ERR_PTR(-EADDRINUSE);
850 }
851
KyongHo Cho2a965362012-05-12 05:56:09 +0900852 if (lv1ent_fault(sent)) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530853 sysmmu_pte_t *pent;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530854 bool need_flush_flpd_cache = lv1ent_zero(sent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900855
Cho KyongHo734c3c72014-05-12 11:44:48 +0530856 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
Cho KyongHod09d78f2014-05-12 11:44:58 +0530857 BUG_ON((unsigned int)pent & (LV2TABLE_SIZE - 1));
KyongHo Cho2a965362012-05-12 05:56:09 +0900858 if (!pent)
Cho KyongHo61128f02014-05-12 11:44:47 +0530859 return ERR_PTR(-ENOMEM);
KyongHo Cho2a965362012-05-12 05:56:09 +0900860
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530861 *sent = mk_lv1ent_page(virt_to_phys(pent));
Colin Crossdc3814f2015-05-08 17:05:44 -0700862 kmemleak_ignore(pent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900863 *pgcounter = NUM_LV2ENTRIES;
864 pgtable_flush(pent, pent + NUM_LV2ENTRIES);
865 pgtable_flush(sent, sent + 1);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530866
867 /*
Sachin Kamatf171aba2014-08-04 10:06:28 +0530868 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
869 * FLPD cache may cache the address of zero_l2_table. This
870 * function replaces the zero_l2_table with new L2 page table
871 * to write valid mappings.
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530872 * Accessing the valid area may cause page fault since FLPD
Sachin Kamatf171aba2014-08-04 10:06:28 +0530873 * cache may still cache zero_l2_table for the valid area
874 * instead of new L2 page table that has the mapping
875 * information of the valid area.
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530876 * Thus any replacement of zero_l2_table with other valid L2
877 * page table must involve FLPD cache invalidation for System
878 * MMU v3.3.
879 * FLPD cache invalidation is performed with TLB invalidation
880 * by VPN without blocking. It is safe to invalidate TLB without
881 * blocking because the target address of TLB invalidation is
882 * not currently mapped.
883 */
884 if (need_flush_flpd_cache) {
885 struct exynos_iommu_owner *owner;
Sachin Kamat365409d2014-05-22 09:50:56 +0530886
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530887 spin_lock(&priv->lock);
888 list_for_each_entry(owner, &priv->clients, client)
889 sysmmu_tlb_invalidate_flpdcache(
890 owner->dev, iova);
891 spin_unlock(&priv->lock);
892 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900893 }
894
895 return page_entry(sent, iova);
896}
897
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530898static int lv1set_section(struct exynos_iommu_domain *priv,
899 sysmmu_pte_t *sent, sysmmu_iova_t iova,
Cho KyongHo61128f02014-05-12 11:44:47 +0530900 phys_addr_t paddr, short *pgcnt)
KyongHo Cho2a965362012-05-12 05:56:09 +0900901{
Cho KyongHo61128f02014-05-12 11:44:47 +0530902 if (lv1ent_section(sent)) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530903 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
Cho KyongHo61128f02014-05-12 11:44:47 +0530904 iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900905 return -EADDRINUSE;
Cho KyongHo61128f02014-05-12 11:44:47 +0530906 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900907
908 if (lv1ent_page(sent)) {
Cho KyongHo61128f02014-05-12 11:44:47 +0530909 if (*pgcnt != NUM_LV2ENTRIES) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530910 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
Cho KyongHo61128f02014-05-12 11:44:47 +0530911 iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900912 return -EADDRINUSE;
Cho KyongHo61128f02014-05-12 11:44:47 +0530913 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900914
Cho KyongHo734c3c72014-05-12 11:44:48 +0530915 kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
KyongHo Cho2a965362012-05-12 05:56:09 +0900916 *pgcnt = 0;
917 }
918
919 *sent = mk_lv1ent_sect(paddr);
920
921 pgtable_flush(sent, sent + 1);
922
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530923 spin_lock(&priv->lock);
924 if (lv1ent_page_zero(sent)) {
925 struct exynos_iommu_owner *owner;
926 /*
927 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
928 * entry by speculative prefetch of SLPD which has no mapping.
929 */
930 list_for_each_entry(owner, &priv->clients, client)
931 sysmmu_tlb_invalidate_flpdcache(owner->dev, iova);
932 }
933 spin_unlock(&priv->lock);
934
KyongHo Cho2a965362012-05-12 05:56:09 +0900935 return 0;
936}
937
Cho KyongHod09d78f2014-05-12 11:44:58 +0530938static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
KyongHo Cho2a965362012-05-12 05:56:09 +0900939 short *pgcnt)
940{
941 if (size == SPAGE_SIZE) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +0530942 if (WARN_ON(!lv2ent_fault(pent)))
KyongHo Cho2a965362012-05-12 05:56:09 +0900943 return -EADDRINUSE;
944
945 *pent = mk_lv2ent_spage(paddr);
946 pgtable_flush(pent, pent + 1);
947 *pgcnt -= 1;
948 } else { /* size == LPAGE_SIZE */
949 int i;
Sachin Kamat365409d2014-05-22 09:50:56 +0530950
KyongHo Cho2a965362012-05-12 05:56:09 +0900951 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +0530952 if (WARN_ON(!lv2ent_fault(pent))) {
Cho KyongHo61128f02014-05-12 11:44:47 +0530953 if (i > 0)
954 memset(pent - i, 0, sizeof(*pent) * i);
KyongHo Cho2a965362012-05-12 05:56:09 +0900955 return -EADDRINUSE;
956 }
957
958 *pent = mk_lv2ent_lpage(paddr);
959 }
960 pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
961 *pgcnt -= SPAGES_PER_LPAGE;
962 }
963
964 return 0;
965}
966
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530967/*
968 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
969 *
Sachin Kamatf171aba2014-08-04 10:06:28 +0530970 * System MMU v3.x has advanced logic to improve address translation
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530971 * performance with caching more page table entries by a page table walk.
Sachin Kamatf171aba2014-08-04 10:06:28 +0530972 * However, the logic has a bug that while caching faulty page table entries,
973 * System MMU reports page fault if the cached fault entry is hit even though
974 * the fault entry is updated to a valid entry after the entry is cached.
975 * To prevent caching faulty page table entries which may be updated to valid
976 * entries later, the virtual memory manager should care about the workaround
977 * for the problem. The following describes the workaround.
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530978 *
979 * Any two consecutive I/O virtual address regions must have a hole of 128KiB
Sachin Kamatf171aba2014-08-04 10:06:28 +0530980 * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530981 *
Sachin Kamatf171aba2014-08-04 10:06:28 +0530982 * Precisely, any start address of I/O virtual region must be aligned with
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530983 * the following sizes for System MMU v3.1 and v3.2.
984 * System MMU v3.1: 128KiB
985 * System MMU v3.2: 256KiB
986 *
987 * Because System MMU v3.3 caches page table entries more aggressively, it needs
Sachin Kamatf171aba2014-08-04 10:06:28 +0530988 * more workarounds.
989 * - Any two consecutive I/O virtual regions must have a hole of size larger
990 * than or equal to 128KiB.
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530991 * - Start address of an I/O virtual region must be aligned by 128KiB.
992 */
Cho KyongHod09d78f2014-05-12 11:44:58 +0530993static int exynos_iommu_map(struct iommu_domain *domain, unsigned long l_iova,
KyongHo Cho2a965362012-05-12 05:56:09 +0900994 phys_addr_t paddr, size_t size, int prot)
995{
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100996 struct exynos_iommu_domain *priv = to_exynos_domain(domain);
Cho KyongHod09d78f2014-05-12 11:44:58 +0530997 sysmmu_pte_t *entry;
998 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
KyongHo Cho2a965362012-05-12 05:56:09 +0900999 unsigned long flags;
1000 int ret = -ENOMEM;
1001
1002 BUG_ON(priv->pgtable == NULL);
1003
1004 spin_lock_irqsave(&priv->pgtablelock, flags);
1005
1006 entry = section_entry(priv->pgtable, iova);
1007
1008 if (size == SECT_SIZE) {
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301009 ret = lv1set_section(priv, entry, iova, paddr,
KyongHo Cho2a965362012-05-12 05:56:09 +09001010 &priv->lv2entcnt[lv1ent_offset(iova)]);
1011 } else {
Cho KyongHod09d78f2014-05-12 11:44:58 +05301012 sysmmu_pte_t *pent;
KyongHo Cho2a965362012-05-12 05:56:09 +09001013
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301014 pent = alloc_lv2entry(priv, entry, iova,
KyongHo Cho2a965362012-05-12 05:56:09 +09001015 &priv->lv2entcnt[lv1ent_offset(iova)]);
1016
Cho KyongHo61128f02014-05-12 11:44:47 +05301017 if (IS_ERR(pent))
1018 ret = PTR_ERR(pent);
KyongHo Cho2a965362012-05-12 05:56:09 +09001019 else
1020 ret = lv2set_page(pent, paddr, size,
1021 &priv->lv2entcnt[lv1ent_offset(iova)]);
1022 }
1023
Cho KyongHo61128f02014-05-12 11:44:47 +05301024 if (ret)
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301025 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
1026 __func__, ret, size, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +09001027
1028 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1029
1030 return ret;
1031}
1032
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301033static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *priv,
1034 sysmmu_iova_t iova, size_t size)
1035{
1036 struct exynos_iommu_owner *owner;
1037 unsigned long flags;
1038
1039 spin_lock_irqsave(&priv->lock, flags);
1040
1041 list_for_each_entry(owner, &priv->clients, client)
1042 sysmmu_tlb_invalidate_entry(owner->dev, iova, size);
1043
1044 spin_unlock_irqrestore(&priv->lock, flags);
1045}
1046
KyongHo Cho2a965362012-05-12 05:56:09 +09001047static size_t exynos_iommu_unmap(struct iommu_domain *domain,
Cho KyongHod09d78f2014-05-12 11:44:58 +05301048 unsigned long l_iova, size_t size)
KyongHo Cho2a965362012-05-12 05:56:09 +09001049{
Joerg Roedele1fd1ea2015-03-26 13:43:11 +01001050 struct exynos_iommu_domain *priv = to_exynos_domain(domain);
Cho KyongHod09d78f2014-05-12 11:44:58 +05301051 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1052 sysmmu_pte_t *ent;
Cho KyongHo61128f02014-05-12 11:44:47 +05301053 size_t err_pgsize;
Cho KyongHod09d78f2014-05-12 11:44:58 +05301054 unsigned long flags;
KyongHo Cho2a965362012-05-12 05:56:09 +09001055
1056 BUG_ON(priv->pgtable == NULL);
1057
1058 spin_lock_irqsave(&priv->pgtablelock, flags);
1059
1060 ent = section_entry(priv->pgtable, iova);
1061
1062 if (lv1ent_section(ent)) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301063 if (WARN_ON(size < SECT_SIZE)) {
Cho KyongHo61128f02014-05-12 11:44:47 +05301064 err_pgsize = SECT_SIZE;
1065 goto err;
1066 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001067
Sachin Kamatf171aba2014-08-04 10:06:28 +05301068 /* workaround for h/w bug in System MMU v3.3 */
1069 *ent = ZERO_LV2LINK;
KyongHo Cho2a965362012-05-12 05:56:09 +09001070 pgtable_flush(ent, ent + 1);
1071 size = SECT_SIZE;
1072 goto done;
1073 }
1074
1075 if (unlikely(lv1ent_fault(ent))) {
1076 if (size > SECT_SIZE)
1077 size = SECT_SIZE;
1078 goto done;
1079 }
1080
1081 /* lv1ent_page(sent) == true here */
1082
1083 ent = page_entry(ent, iova);
1084
1085 if (unlikely(lv2ent_fault(ent))) {
1086 size = SPAGE_SIZE;
1087 goto done;
1088 }
1089
1090 if (lv2ent_small(ent)) {
1091 *ent = 0;
1092 size = SPAGE_SIZE;
Cho KyongHo6cb47ed2014-05-12 11:44:51 +05301093 pgtable_flush(ent, ent + 1);
KyongHo Cho2a965362012-05-12 05:56:09 +09001094 priv->lv2entcnt[lv1ent_offset(iova)] += 1;
1095 goto done;
1096 }
1097
1098 /* lv1ent_large(ent) == true here */
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301099 if (WARN_ON(size < LPAGE_SIZE)) {
Cho KyongHo61128f02014-05-12 11:44:47 +05301100 err_pgsize = LPAGE_SIZE;
1101 goto err;
1102 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001103
1104 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
Cho KyongHo6cb47ed2014-05-12 11:44:51 +05301105 pgtable_flush(ent, ent + SPAGES_PER_LPAGE);
KyongHo Cho2a965362012-05-12 05:56:09 +09001106
1107 size = LPAGE_SIZE;
1108 priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
1109done:
1110 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1111
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301112 exynos_iommu_tlb_invalidate_entry(priv, iova, size);
KyongHo Cho2a965362012-05-12 05:56:09 +09001113
KyongHo Cho2a965362012-05-12 05:56:09 +09001114 return size;
Cho KyongHo61128f02014-05-12 11:44:47 +05301115err:
1116 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1117
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301118 pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1119 __func__, size, iova, err_pgsize);
Cho KyongHo61128f02014-05-12 11:44:47 +05301120
1121 return 0;
KyongHo Cho2a965362012-05-12 05:56:09 +09001122}
1123
1124static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547a2013-03-29 01:23:58 +05301125 dma_addr_t iova)
KyongHo Cho2a965362012-05-12 05:56:09 +09001126{
Joerg Roedele1fd1ea2015-03-26 13:43:11 +01001127 struct exynos_iommu_domain *priv = to_exynos_domain(domain);
Cho KyongHod09d78f2014-05-12 11:44:58 +05301128 sysmmu_pte_t *entry;
KyongHo Cho2a965362012-05-12 05:56:09 +09001129 unsigned long flags;
1130 phys_addr_t phys = 0;
1131
1132 spin_lock_irqsave(&priv->pgtablelock, flags);
1133
1134 entry = section_entry(priv->pgtable, iova);
1135
1136 if (lv1ent_section(entry)) {
1137 phys = section_phys(entry) + section_offs(iova);
1138 } else if (lv1ent_page(entry)) {
1139 entry = page_entry(entry, iova);
1140
1141 if (lv2ent_large(entry))
1142 phys = lpage_phys(entry) + lpage_offs(iova);
1143 else if (lv2ent_small(entry))
1144 phys = spage_phys(entry) + spage_offs(iova);
1145 }
1146
1147 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1148
1149 return phys;
1150}
1151
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301152static int exynos_iommu_add_device(struct device *dev)
1153{
1154 struct iommu_group *group;
1155 int ret;
1156
1157 group = iommu_group_get(dev);
1158
1159 if (!group) {
1160 group = iommu_group_alloc();
1161 if (IS_ERR(group)) {
1162 dev_err(dev, "Failed to allocate IOMMU group\n");
1163 return PTR_ERR(group);
1164 }
1165 }
1166
1167 ret = iommu_group_add_device(group, dev);
1168 iommu_group_put(group);
1169
1170 return ret;
1171}
1172
1173static void exynos_iommu_remove_device(struct device *dev)
1174{
1175 iommu_group_remove_device(dev);
1176}
1177
Thierry Redingb22f6432014-06-27 09:03:12 +02001178static const struct iommu_ops exynos_iommu_ops = {
Joerg Roedele1fd1ea2015-03-26 13:43:11 +01001179 .domain_alloc = exynos_iommu_domain_alloc,
1180 .domain_free = exynos_iommu_domain_free,
Bjorn Helgaasba5fa6f2014-05-08 14:49:14 -06001181 .attach_dev = exynos_iommu_attach_device,
1182 .detach_dev = exynos_iommu_detach_device,
1183 .map = exynos_iommu_map,
1184 .unmap = exynos_iommu_unmap,
Olav Haugan315786e2014-10-25 09:55:16 -07001185 .map_sg = default_iommu_map_sg,
Bjorn Helgaasba5fa6f2014-05-08 14:49:14 -06001186 .iova_to_phys = exynos_iommu_iova_to_phys,
1187 .add_device = exynos_iommu_add_device,
1188 .remove_device = exynos_iommu_remove_device,
KyongHo Cho2a965362012-05-12 05:56:09 +09001189 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
1190};
1191
1192static int __init exynos_iommu_init(void)
1193{
Thierry Redinga7b67cd2015-02-06 11:44:05 +01001194 struct device_node *np;
KyongHo Cho2a965362012-05-12 05:56:09 +09001195 int ret;
1196
Thierry Redinga7b67cd2015-02-06 11:44:05 +01001197 np = of_find_matching_node(NULL, sysmmu_of_match);
1198 if (!np)
1199 return 0;
1200
1201 of_node_put(np);
1202
Cho KyongHo734c3c72014-05-12 11:44:48 +05301203 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1204 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1205 if (!lv2table_kmem_cache) {
1206 pr_err("%s: Failed to create kmem cache\n", __func__);
1207 return -ENOMEM;
1208 }
1209
KyongHo Cho2a965362012-05-12 05:56:09 +09001210 ret = platform_driver_register(&exynos_sysmmu_driver);
Cho KyongHo734c3c72014-05-12 11:44:48 +05301211 if (ret) {
1212 pr_err("%s: Failed to register driver\n", __func__);
1213 goto err_reg_driver;
1214 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001215
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301216 zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1217 if (zero_lv2_table == NULL) {
1218 pr_err("%s: Failed to allocate zero level2 page table\n",
1219 __func__);
1220 ret = -ENOMEM;
1221 goto err_zero_lv2;
1222 }
1223
Cho KyongHo734c3c72014-05-12 11:44:48 +05301224 ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1225 if (ret) {
1226 pr_err("%s: Failed to register exynos-iommu driver.\n",
1227 __func__);
1228 goto err_set_iommu;
1229 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001230
Cho KyongHo734c3c72014-05-12 11:44:48 +05301231 return 0;
1232err_set_iommu:
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301233 kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
1234err_zero_lv2:
Cho KyongHo734c3c72014-05-12 11:44:48 +05301235 platform_driver_unregister(&exynos_sysmmu_driver);
1236err_reg_driver:
1237 kmem_cache_destroy(lv2table_kmem_cache);
KyongHo Cho2a965362012-05-12 05:56:09 +09001238 return ret;
1239}
1240subsys_initcall(exynos_iommu_init);