blob: d89ad5f8747c1c46b221f145189fbbacead6b4a9 [file] [log] [blame]
KyongHo Cho2a965362012-05-12 05:56:09 +09001/* linux/drivers/iommu/exynos_iommu.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
12#define DEBUG
13#endif
14
15#include <linux/io.h>
16#include <linux/interrupt.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19#include <linux/pm_runtime.h>
20#include <linux/clk.h>
21#include <linux/err.h>
22#include <linux/mm.h>
23#include <linux/iommu.h>
24#include <linux/errno.h>
25#include <linux/list.h>
26#include <linux/memblock.h>
27#include <linux/export.h>
28
29#include <asm/cacheflush.h>
30#include <asm/pgtable.h>
31
KyongHo Cho2a965362012-05-12 05:56:09 +090032/* We does not consider super section mapping (16MB) */
33#define SECT_ORDER 20
34#define LPAGE_ORDER 16
35#define SPAGE_ORDER 12
36
37#define SECT_SIZE (1 << SECT_ORDER)
38#define LPAGE_SIZE (1 << LPAGE_ORDER)
39#define SPAGE_SIZE (1 << SPAGE_ORDER)
40
41#define SECT_MASK (~(SECT_SIZE - 1))
42#define LPAGE_MASK (~(LPAGE_SIZE - 1))
43#define SPAGE_MASK (~(SPAGE_SIZE - 1))
44
45#define lv1ent_fault(sent) (((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
46#define lv1ent_page(sent) ((*(sent) & 3) == 1)
47#define lv1ent_section(sent) ((*(sent) & 3) == 2)
48
49#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
50#define lv2ent_small(pent) ((*(pent) & 2) == 2)
51#define lv2ent_large(pent) ((*(pent) & 3) == 1)
52
53#define section_phys(sent) (*(sent) & SECT_MASK)
54#define section_offs(iova) ((iova) & 0xFFFFF)
55#define lpage_phys(pent) (*(pent) & LPAGE_MASK)
56#define lpage_offs(iova) ((iova) & 0xFFFF)
57#define spage_phys(pent) (*(pent) & SPAGE_MASK)
58#define spage_offs(iova) ((iova) & 0xFFF)
59
60#define lv1ent_offset(iova) ((iova) >> SECT_ORDER)
61#define lv2ent_offset(iova) (((iova) & 0xFF000) >> SPAGE_ORDER)
62
63#define NUM_LV1ENTRIES 4096
64#define NUM_LV2ENTRIES 256
65
66#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
67
68#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
69
70#define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
71
72#define mk_lv1ent_sect(pa) ((pa) | 2)
73#define mk_lv1ent_page(pa) ((pa) | 1)
74#define mk_lv2ent_lpage(pa) ((pa) | 1)
75#define mk_lv2ent_spage(pa) ((pa) | 2)
76
77#define CTRL_ENABLE 0x5
78#define CTRL_BLOCK 0x7
79#define CTRL_DISABLE 0x0
80
81#define REG_MMU_CTRL 0x000
82#define REG_MMU_CFG 0x004
83#define REG_MMU_STATUS 0x008
84#define REG_MMU_FLUSH 0x00C
85#define REG_MMU_FLUSH_ENTRY 0x010
86#define REG_PT_BASE_ADDR 0x014
87#define REG_INT_STATUS 0x018
88#define REG_INT_CLEAR 0x01C
89
90#define REG_PAGE_FAULT_ADDR 0x024
91#define REG_AW_FAULT_ADDR 0x028
92#define REG_AR_FAULT_ADDR 0x02C
93#define REG_DEFAULT_SLAVE_ADDR 0x030
94
95#define REG_MMU_VERSION 0x034
96
97#define REG_PB0_SADDR 0x04C
98#define REG_PB0_EADDR 0x050
99#define REG_PB1_SADDR 0x054
100#define REG_PB1_EADDR 0x058
101
Cho KyongHo734c3c72014-05-12 11:44:48 +0530102static struct kmem_cache *lv2table_kmem_cache;
103
KyongHo Cho2a965362012-05-12 05:56:09 +0900104static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova)
105{
106 return pgtable + lv1ent_offset(iova);
107}
108
109static unsigned long *page_entry(unsigned long *sent, unsigned long iova)
110{
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530111 return (unsigned long *)phys_to_virt(
112 lv2table_base(sent)) + lv2ent_offset(iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900113}
114
115enum exynos_sysmmu_inttype {
116 SYSMMU_PAGEFAULT,
117 SYSMMU_AR_MULTIHIT,
118 SYSMMU_AW_MULTIHIT,
119 SYSMMU_BUSERROR,
120 SYSMMU_AR_SECURITY,
121 SYSMMU_AR_ACCESS,
122 SYSMMU_AW_SECURITY,
123 SYSMMU_AW_PROTECTION, /* 7 */
124 SYSMMU_FAULT_UNKNOWN,
125 SYSMMU_FAULTS_NUM
126};
127
KyongHo Cho2a965362012-05-12 05:56:09 +0900128static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
129 REG_PAGE_FAULT_ADDR,
130 REG_AR_FAULT_ADDR,
131 REG_AW_FAULT_ADDR,
132 REG_DEFAULT_SLAVE_ADDR,
133 REG_AR_FAULT_ADDR,
134 REG_AR_FAULT_ADDR,
135 REG_AW_FAULT_ADDR,
136 REG_AW_FAULT_ADDR
137};
138
139static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
140 "PAGE FAULT",
141 "AR MULTI-HIT FAULT",
142 "AW MULTI-HIT FAULT",
143 "BUS ERROR",
144 "AR SECURITY PROTECTION FAULT",
145 "AR ACCESS PROTECTION FAULT",
146 "AW SECURITY PROTECTION FAULT",
147 "AW ACCESS PROTECTION FAULT",
148 "UNKNOWN FAULT"
149};
150
151struct exynos_iommu_domain {
152 struct list_head clients; /* list of sysmmu_drvdata.node */
153 unsigned long *pgtable; /* lv1 page table, 16KB */
154 short *lv2entcnt; /* free lv2 entry counter for each section */
155 spinlock_t lock; /* lock for this structure */
156 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
157};
158
159struct sysmmu_drvdata {
160 struct list_head node; /* entry of exynos_iommu_domain.clients */
161 struct device *sysmmu; /* System MMU's device descriptor */
162 struct device *dev; /* Owner of system MMU */
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530163 void __iomem *sfrbase;
164 struct clk *clk;
Cho KyongHo70605872014-05-12 11:44:55 +0530165 struct clk *clk_master;
KyongHo Cho2a965362012-05-12 05:56:09 +0900166 int activations;
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530167 spinlock_t lock;
KyongHo Cho2a965362012-05-12 05:56:09 +0900168 struct iommu_domain *domain;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530169 phys_addr_t pgtable;
KyongHo Cho2a965362012-05-12 05:56:09 +0900170};
171
172static bool set_sysmmu_active(struct sysmmu_drvdata *data)
173{
174 /* return true if the System MMU was not active previously
175 and it needs to be initialized */
176 return ++data->activations == 1;
177}
178
179static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
180{
181 /* return true if the System MMU is needed to be disabled */
182 BUG_ON(data->activations < 1);
183 return --data->activations == 0;
184}
185
186static bool is_sysmmu_active(struct sysmmu_drvdata *data)
187{
188 return data->activations > 0;
189}
190
191static void sysmmu_unblock(void __iomem *sfrbase)
192{
193 __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
194}
195
196static bool sysmmu_block(void __iomem *sfrbase)
197{
198 int i = 120;
199
200 __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
201 while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
202 --i;
203
204 if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
205 sysmmu_unblock(sfrbase);
206 return false;
207 }
208
209 return true;
210}
211
212static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
213{
214 __raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
215}
216
217static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530218 unsigned long iova, unsigned int num_inv)
KyongHo Cho2a965362012-05-12 05:56:09 +0900219{
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530220 unsigned int i;
221 for (i = 0; i < num_inv; i++) {
222 __raw_writel((iova & SPAGE_MASK) | 1,
223 sfrbase + REG_MMU_FLUSH_ENTRY);
224 iova += SPAGE_SIZE;
225 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900226}
227
228static void __sysmmu_set_ptbase(void __iomem *sfrbase,
229 unsigned long pgd)
230{
231 __raw_writel(0x1, sfrbase + REG_MMU_CFG); /* 16KB LV1, LRU */
232 __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
233
234 __sysmmu_tlb_invalidate(sfrbase);
235}
236
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530237static void show_fault_information(const char *name,
238 enum exynos_sysmmu_inttype itype,
239 phys_addr_t pgtable_base, unsigned long fault_addr)
KyongHo Cho2a965362012-05-12 05:56:09 +0900240{
241 unsigned long *ent;
242
243 if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
244 itype = SYSMMU_FAULT_UNKNOWN;
245
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530246 pr_err("%s occurred at %#lx by %s(Page table base: %pa)\n",
247 sysmmu_fault_name[itype], fault_addr, name, &pgtable_base);
KyongHo Cho2a965362012-05-12 05:56:09 +0900248
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530249 ent = section_entry(phys_to_virt(pgtable_base), fault_addr);
KyongHo Cho2a965362012-05-12 05:56:09 +0900250 pr_err("\tLv1 entry: 0x%lx\n", *ent);
251
252 if (lv1ent_page(ent)) {
253 ent = page_entry(ent, fault_addr);
254 pr_err("\t Lv2 entry: 0x%lx\n", *ent);
255 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900256}
257
258static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
259{
260 /* SYSMMU is in blocked when interrupt occurred. */
261 struct sysmmu_drvdata *data = dev_id;
KyongHo Cho2a965362012-05-12 05:56:09 +0900262 enum exynos_sysmmu_inttype itype;
263 unsigned long addr = -1;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530264 int ret = -ENOSYS;
KyongHo Cho2a965362012-05-12 05:56:09 +0900265
KyongHo Cho2a965362012-05-12 05:56:09 +0900266 WARN_ON(!is_sysmmu_active(data));
267
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530268 spin_lock(&data->lock);
269
Cho KyongHo70605872014-05-12 11:44:55 +0530270 if (!IS_ERR(data->clk_master))
271 clk_enable(data->clk_master);
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530272
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530273 itype = (enum exynos_sysmmu_inttype)
274 __ffs(__raw_readl(data->sfrbase + REG_INT_STATUS));
275 if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
KyongHo Cho2a965362012-05-12 05:56:09 +0900276 itype = SYSMMU_FAULT_UNKNOWN;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530277 else
278 addr = __raw_readl(data->sfrbase + fault_reg_offset[itype]);
KyongHo Cho2a965362012-05-12 05:56:09 +0900279
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530280 if (itype == SYSMMU_FAULT_UNKNOWN) {
281 pr_err("%s: Fault is not occurred by System MMU '%s'!\n",
282 __func__, dev_name(data->sysmmu));
283 pr_err("%s: Please check if IRQ is correctly configured.\n",
284 __func__);
285 BUG();
286 } else {
287 unsigned long base =
288 __raw_readl(data->sfrbase + REG_PT_BASE_ADDR);
289 show_fault_information(dev_name(data->sysmmu),
290 itype, base, addr);
291 if (data->domain)
292 ret = report_iommu_fault(data->domain,
293 data->dev, addr, itype);
KyongHo Cho2a965362012-05-12 05:56:09 +0900294 }
295
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530296 /* fault is not recovered by fault handler */
297 BUG_ON(ret != 0);
KyongHo Cho2a965362012-05-12 05:56:09 +0900298
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530299 __raw_writel(1 << itype, data->sfrbase + REG_INT_CLEAR);
300
301 sysmmu_unblock(data->sfrbase);
KyongHo Cho2a965362012-05-12 05:56:09 +0900302
Cho KyongHo70605872014-05-12 11:44:55 +0530303 if (!IS_ERR(data->clk_master))
304 clk_disable(data->clk_master);
305
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530306 spin_unlock(&data->lock);
KyongHo Cho2a965362012-05-12 05:56:09 +0900307
308 return IRQ_HANDLED;
309}
310
311static bool __exynos_sysmmu_disable(struct sysmmu_drvdata *data)
312{
313 unsigned long flags;
314 bool disabled = false;
KyongHo Cho2a965362012-05-12 05:56:09 +0900315
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530316 spin_lock_irqsave(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900317
318 if (!set_sysmmu_inactive(data))
319 goto finish;
320
Cho KyongHo70605872014-05-12 11:44:55 +0530321 if (!IS_ERR(data->clk_master))
322 clk_enable(data->clk_master);
323
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530324 __raw_writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
KyongHo Cho2a965362012-05-12 05:56:09 +0900325
Cho KyongHo46c16d12014-05-12 11:44:54 +0530326 clk_disable(data->clk);
Cho KyongHo70605872014-05-12 11:44:55 +0530327 if (!IS_ERR(data->clk_master))
328 clk_disable(data->clk_master);
KyongHo Cho2a965362012-05-12 05:56:09 +0900329
330 disabled = true;
331 data->pgtable = 0;
332 data->domain = NULL;
333finish:
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530334 spin_unlock_irqrestore(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900335
336 if (disabled)
Cho KyongHoe5cf63c2014-05-12 11:44:53 +0530337 dev_dbg(data->sysmmu, "Disabled\n");
KyongHo Cho2a965362012-05-12 05:56:09 +0900338 else
Cho KyongHoe5cf63c2014-05-12 11:44:53 +0530339 dev_dbg(data->sysmmu, "%d times left to be disabled\n",
340 data->activations);
KyongHo Cho2a965362012-05-12 05:56:09 +0900341
342 return disabled;
343}
344
345/* __exynos_sysmmu_enable: Enables System MMU
346 *
347 * returns -error if an error occurred and System MMU is not enabled,
348 * 0 if the System MMU has been just enabled and 1 if System MMU was already
349 * enabled before.
350 */
351static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data,
352 unsigned long pgtable, struct iommu_domain *domain)
353{
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530354 int ret = 0;
KyongHo Cho2a965362012-05-12 05:56:09 +0900355 unsigned long flags;
356
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530357 spin_lock_irqsave(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900358
359 if (!set_sysmmu_active(data)) {
360 if (WARN_ON(pgtable != data->pgtable)) {
361 ret = -EBUSY;
362 set_sysmmu_inactive(data);
363 } else {
364 ret = 1;
365 }
366
Cho KyongHoe5cf63c2014-05-12 11:44:53 +0530367 dev_dbg(data->sysmmu, "Already enabled\n");
KyongHo Cho2a965362012-05-12 05:56:09 +0900368 goto finish;
369 }
370
KyongHo Cho2a965362012-05-12 05:56:09 +0900371 data->pgtable = pgtable;
372
Cho KyongHo70605872014-05-12 11:44:55 +0530373 if (!IS_ERR(data->clk_master))
374 clk_enable(data->clk_master);
375 clk_enable(data->clk);
376
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530377 __sysmmu_set_ptbase(data->sfrbase, pgtable);
KyongHo Cho2a965362012-05-12 05:56:09 +0900378
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530379 __raw_writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
380
Cho KyongHo70605872014-05-12 11:44:55 +0530381 if (!IS_ERR(data->clk_master))
382 clk_disable(data->clk_master);
383
KyongHo Cho2a965362012-05-12 05:56:09 +0900384 data->domain = domain;
385
Cho KyongHoe5cf63c2014-05-12 11:44:53 +0530386 dev_dbg(data->sysmmu, "Enabled\n");
KyongHo Cho2a965362012-05-12 05:56:09 +0900387finish:
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530388 spin_unlock_irqrestore(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900389
390 return ret;
391}
392
393int exynos_sysmmu_enable(struct device *dev, unsigned long pgtable)
394{
395 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
396 int ret;
397
398 BUG_ON(!memblock_is_memory(pgtable));
399
400 ret = pm_runtime_get_sync(data->sysmmu);
401 if (ret < 0) {
Cho KyongHoe5cf63c2014-05-12 11:44:53 +0530402 dev_dbg(data->sysmmu, "Failed to enable\n");
KyongHo Cho2a965362012-05-12 05:56:09 +0900403 return ret;
404 }
405
406 ret = __exynos_sysmmu_enable(data, pgtable, NULL);
407 if (WARN_ON(ret < 0)) {
408 pm_runtime_put(data->sysmmu);
Cho KyongHoe5cf63c2014-05-12 11:44:53 +0530409 dev_err(data->sysmmu, "Already enabled with page table %#x\n",
410 data->pgtable);
KyongHo Cho2a965362012-05-12 05:56:09 +0900411 } else {
412 data->dev = dev;
413 }
414
415 return ret;
416}
417
Sachin Kamat77e38352013-02-06 13:55:17 +0530418static bool exynos_sysmmu_disable(struct device *dev)
KyongHo Cho2a965362012-05-12 05:56:09 +0900419{
420 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
421 bool disabled;
422
423 disabled = __exynos_sysmmu_disable(data);
424 pm_runtime_put(data->sysmmu);
425
426 return disabled;
427}
428
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530429static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova,
430 size_t size)
KyongHo Cho2a965362012-05-12 05:56:09 +0900431{
432 unsigned long flags;
433 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
434
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530435 spin_lock_irqsave(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900436
437 if (is_sysmmu_active(data)) {
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530438 unsigned int maj;
439 unsigned int num_inv = 1;
Cho KyongHo70605872014-05-12 11:44:55 +0530440
441 if (!IS_ERR(data->clk_master))
442 clk_enable(data->clk_master);
443
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530444 maj = __raw_readl(data->sfrbase + REG_MMU_VERSION);
445 /*
446 * L2TLB invalidation required
447 * 4KB page: 1 invalidation
448 * 64KB page: 16 invalidation
449 * 1MB page: 64 invalidation
450 * because it is set-associative TLB
451 * with 8-way and 64 sets.
452 * 1MB page can be cached in one of all sets.
453 * 64KB page can be one of 16 consecutive sets.
454 */
455 if ((maj >> 28) == 2) /* major version number */
456 num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
457
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530458 if (sysmmu_block(data->sfrbase)) {
459 __sysmmu_tlb_invalidate_entry(
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530460 data->sfrbase, iova, num_inv);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530461 sysmmu_unblock(data->sfrbase);
KyongHo Cho2a965362012-05-12 05:56:09 +0900462 }
Cho KyongHo70605872014-05-12 11:44:55 +0530463 if (!IS_ERR(data->clk_master))
464 clk_disable(data->clk_master);
KyongHo Cho2a965362012-05-12 05:56:09 +0900465 } else {
Cho KyongHoe5cf63c2014-05-12 11:44:53 +0530466 dev_dbg(data->sysmmu, "Disabled. Skipping invalidating TLB.\n");
KyongHo Cho2a965362012-05-12 05:56:09 +0900467 }
468
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530469 spin_unlock_irqrestore(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900470}
471
472void exynos_sysmmu_tlb_invalidate(struct device *dev)
473{
474 unsigned long flags;
475 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
476
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530477 spin_lock_irqsave(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900478
479 if (is_sysmmu_active(data)) {
Cho KyongHo70605872014-05-12 11:44:55 +0530480 if (!IS_ERR(data->clk_master))
481 clk_enable(data->clk_master);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530482 if (sysmmu_block(data->sfrbase)) {
483 __sysmmu_tlb_invalidate(data->sfrbase);
484 sysmmu_unblock(data->sfrbase);
KyongHo Cho2a965362012-05-12 05:56:09 +0900485 }
Cho KyongHo70605872014-05-12 11:44:55 +0530486 if (!IS_ERR(data->clk_master))
487 clk_disable(data->clk_master);
KyongHo Cho2a965362012-05-12 05:56:09 +0900488 } else {
Cho KyongHoe5cf63c2014-05-12 11:44:53 +0530489 dev_dbg(data->sysmmu, "Disabled. Skipping invalidating TLB.\n");
KyongHo Cho2a965362012-05-12 05:56:09 +0900490 }
491
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530492 spin_unlock_irqrestore(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900493}
494
495static int exynos_sysmmu_probe(struct platform_device *pdev)
496{
Cho KyongHo46c16d12014-05-12 11:44:54 +0530497 int irq, ret;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530498 struct device *dev = &pdev->dev;
KyongHo Cho2a965362012-05-12 05:56:09 +0900499 struct sysmmu_drvdata *data;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530500 struct resource *res;
KyongHo Cho2a965362012-05-12 05:56:09 +0900501
Cho KyongHo46c16d12014-05-12 11:44:54 +0530502 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
503 if (!data)
504 return -ENOMEM;
KyongHo Cho2a965362012-05-12 05:56:09 +0900505
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530506 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Cho KyongHo46c16d12014-05-12 11:44:54 +0530507 data->sfrbase = devm_ioremap_resource(dev, res);
508 if (IS_ERR(data->sfrbase))
509 return PTR_ERR(data->sfrbase);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530510
Cho KyongHo46c16d12014-05-12 11:44:54 +0530511 irq = platform_get_irq(pdev, 0);
512 if (irq <= 0) {
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530513 dev_dbg(dev, "Unable to find IRQ resource\n");
Cho KyongHo46c16d12014-05-12 11:44:54 +0530514 return irq;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530515 }
516
Cho KyongHo46c16d12014-05-12 11:44:54 +0530517 ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530518 dev_name(dev), data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900519 if (ret) {
Cho KyongHo46c16d12014-05-12 11:44:54 +0530520 dev_err(dev, "Unabled to register handler of irq %d\n", irq);
521 return ret;
KyongHo Cho2a965362012-05-12 05:56:09 +0900522 }
523
Cho KyongHo46c16d12014-05-12 11:44:54 +0530524 data->clk = devm_clk_get(dev, "sysmmu");
525 if (IS_ERR(data->clk)) {
526 dev_err(dev, "Failed to get clock!\n");
527 return PTR_ERR(data->clk);
528 } else {
529 ret = clk_prepare(data->clk);
530 if (ret) {
531 dev_err(dev, "Failed to prepare clk\n");
532 return ret;
533 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900534 }
535
Cho KyongHo70605872014-05-12 11:44:55 +0530536 data->clk_master = devm_clk_get(dev, "master");
537 if (!IS_ERR(data->clk_master)) {
538 ret = clk_prepare(data->clk_master);
539 if (ret) {
540 clk_unprepare(data->clk);
541 dev_err(dev, "Failed to prepare master's clk\n");
542 return ret;
543 }
544 }
545
KyongHo Cho2a965362012-05-12 05:56:09 +0900546 data->sysmmu = dev;
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530547 spin_lock_init(&data->lock);
KyongHo Cho2a965362012-05-12 05:56:09 +0900548 INIT_LIST_HEAD(&data->node);
549
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530550 platform_set_drvdata(pdev, data);
551
Cho KyongHof4723ec2014-05-12 11:44:52 +0530552 pm_runtime_enable(dev);
KyongHo Cho2a965362012-05-12 05:56:09 +0900553
KyongHo Cho2a965362012-05-12 05:56:09 +0900554 return 0;
KyongHo Cho2a965362012-05-12 05:56:09 +0900555}
556
557static struct platform_driver exynos_sysmmu_driver = {
558 .probe = exynos_sysmmu_probe,
559 .driver = {
560 .owner = THIS_MODULE,
561 .name = "exynos-sysmmu",
562 }
563};
564
565static inline void pgtable_flush(void *vastart, void *vaend)
566{
567 dmac_flush_range(vastart, vaend);
568 outer_flush_range(virt_to_phys(vastart),
569 virt_to_phys(vaend));
570}
571
572static int exynos_iommu_domain_init(struct iommu_domain *domain)
573{
574 struct exynos_iommu_domain *priv;
575
576 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
577 if (!priv)
578 return -ENOMEM;
579
580 priv->pgtable = (unsigned long *)__get_free_pages(
581 GFP_KERNEL | __GFP_ZERO, 2);
582 if (!priv->pgtable)
583 goto err_pgtable;
584
585 priv->lv2entcnt = (short *)__get_free_pages(
586 GFP_KERNEL | __GFP_ZERO, 1);
587 if (!priv->lv2entcnt)
588 goto err_counter;
589
590 pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
591
592 spin_lock_init(&priv->lock);
593 spin_lock_init(&priv->pgtablelock);
594 INIT_LIST_HEAD(&priv->clients);
595
Sachin Kamateb516372012-08-01 14:35:17 +0530596 domain->geometry.aperture_start = 0;
597 domain->geometry.aperture_end = ~0UL;
598 domain->geometry.force_aperture = true;
Joerg Roedel3177bb72012-07-11 12:41:10 +0200599
KyongHo Cho2a965362012-05-12 05:56:09 +0900600 domain->priv = priv;
601 return 0;
602
603err_counter:
604 free_pages((unsigned long)priv->pgtable, 2);
605err_pgtable:
606 kfree(priv);
607 return -ENOMEM;
608}
609
610static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
611{
612 struct exynos_iommu_domain *priv = domain->priv;
613 struct sysmmu_drvdata *data;
614 unsigned long flags;
615 int i;
616
617 WARN_ON(!list_empty(&priv->clients));
618
619 spin_lock_irqsave(&priv->lock, flags);
620
621 list_for_each_entry(data, &priv->clients, node) {
622 while (!exynos_sysmmu_disable(data->dev))
623 ; /* until System MMU is actually disabled */
624 }
625
626 spin_unlock_irqrestore(&priv->lock, flags);
627
628 for (i = 0; i < NUM_LV1ENTRIES; i++)
629 if (lv1ent_page(priv->pgtable + i))
Cho KyongHo734c3c72014-05-12 11:44:48 +0530630 kmem_cache_free(lv2table_kmem_cache,
631 phys_to_virt(lv2table_base(priv->pgtable + i)));
KyongHo Cho2a965362012-05-12 05:56:09 +0900632
633 free_pages((unsigned long)priv->pgtable, 2);
634 free_pages((unsigned long)priv->lv2entcnt, 1);
635 kfree(domain->priv);
636 domain->priv = NULL;
637}
638
639static int exynos_iommu_attach_device(struct iommu_domain *domain,
640 struct device *dev)
641{
642 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
643 struct exynos_iommu_domain *priv = domain->priv;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530644 phys_addr_t pagetable = virt_to_phys(priv->pgtable);
KyongHo Cho2a965362012-05-12 05:56:09 +0900645 unsigned long flags;
646 int ret;
647
648 ret = pm_runtime_get_sync(data->sysmmu);
649 if (ret < 0)
650 return ret;
651
652 ret = 0;
653
654 spin_lock_irqsave(&priv->lock, flags);
655
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530656 ret = __exynos_sysmmu_enable(data, pagetable, domain);
KyongHo Cho2a965362012-05-12 05:56:09 +0900657
658 if (ret == 0) {
659 /* 'data->node' must not be appeared in priv->clients */
660 BUG_ON(!list_empty(&data->node));
661 data->dev = dev;
662 list_add_tail(&data->node, &priv->clients);
663 }
664
665 spin_unlock_irqrestore(&priv->lock, flags);
666
667 if (ret < 0) {
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530668 dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
669 __func__, &pagetable);
KyongHo Cho2a965362012-05-12 05:56:09 +0900670 pm_runtime_put(data->sysmmu);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530671 return ret;
KyongHo Cho2a965362012-05-12 05:56:09 +0900672 }
673
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530674 dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
675 __func__, &pagetable, (ret == 0) ? "" : ", again");
676
KyongHo Cho2a965362012-05-12 05:56:09 +0900677 return ret;
678}
679
680static void exynos_iommu_detach_device(struct iommu_domain *domain,
681 struct device *dev)
682{
683 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
684 struct exynos_iommu_domain *priv = domain->priv;
685 struct list_head *pos;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530686 phys_addr_t pagetable = virt_to_phys(priv->pgtable);
KyongHo Cho2a965362012-05-12 05:56:09 +0900687 unsigned long flags;
688 bool found = false;
689
690 spin_lock_irqsave(&priv->lock, flags);
691
692 list_for_each(pos, &priv->clients) {
693 if (list_entry(pos, struct sysmmu_drvdata, node) == data) {
694 found = true;
695 break;
696 }
697 }
698
699 if (!found)
700 goto finish;
701
702 if (__exynos_sysmmu_disable(data)) {
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530703 dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
704 __func__, &pagetable);
Wei Yongjunf8ffcc92012-09-06 12:34:09 +0800705 list_del_init(&data->node);
KyongHo Cho2a965362012-05-12 05:56:09 +0900706
707 } else {
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530708 dev_dbg(dev, "%s: Detaching IOMMU with pgtable %pa delayed",
709 __func__, &pagetable);
KyongHo Cho2a965362012-05-12 05:56:09 +0900710 }
711
712finish:
713 spin_unlock_irqrestore(&priv->lock, flags);
714
715 if (found)
716 pm_runtime_put(data->sysmmu);
717}
718
719static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova,
720 short *pgcounter)
721{
Cho KyongHo61128f02014-05-12 11:44:47 +0530722 if (lv1ent_section(sent)) {
723 WARN(1, "Trying mapping on %#08lx mapped with 1MiB page", iova);
724 return ERR_PTR(-EADDRINUSE);
725 }
726
KyongHo Cho2a965362012-05-12 05:56:09 +0900727 if (lv1ent_fault(sent)) {
728 unsigned long *pent;
729
Cho KyongHo734c3c72014-05-12 11:44:48 +0530730 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
KyongHo Cho2a965362012-05-12 05:56:09 +0900731 BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
732 if (!pent)
Cho KyongHo61128f02014-05-12 11:44:47 +0530733 return ERR_PTR(-ENOMEM);
KyongHo Cho2a965362012-05-12 05:56:09 +0900734
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530735 *sent = mk_lv1ent_page(virt_to_phys(pent));
KyongHo Cho2a965362012-05-12 05:56:09 +0900736 *pgcounter = NUM_LV2ENTRIES;
737 pgtable_flush(pent, pent + NUM_LV2ENTRIES);
738 pgtable_flush(sent, sent + 1);
739 }
740
741 return page_entry(sent, iova);
742}
743
Cho KyongHo61128f02014-05-12 11:44:47 +0530744static int lv1set_section(unsigned long *sent, unsigned long iova,
745 phys_addr_t paddr, short *pgcnt)
KyongHo Cho2a965362012-05-12 05:56:09 +0900746{
Cho KyongHo61128f02014-05-12 11:44:47 +0530747 if (lv1ent_section(sent)) {
748 WARN(1, "Trying mapping on 1MiB@%#08lx that is mapped",
749 iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900750 return -EADDRINUSE;
Cho KyongHo61128f02014-05-12 11:44:47 +0530751 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900752
753 if (lv1ent_page(sent)) {
Cho KyongHo61128f02014-05-12 11:44:47 +0530754 if (*pgcnt != NUM_LV2ENTRIES) {
755 WARN(1, "Trying mapping on 1MiB@%#08lx that is mapped",
756 iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900757 return -EADDRINUSE;
Cho KyongHo61128f02014-05-12 11:44:47 +0530758 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900759
Cho KyongHo734c3c72014-05-12 11:44:48 +0530760 kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
KyongHo Cho2a965362012-05-12 05:56:09 +0900761 *pgcnt = 0;
762 }
763
764 *sent = mk_lv1ent_sect(paddr);
765
766 pgtable_flush(sent, sent + 1);
767
768 return 0;
769}
770
771static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
772 short *pgcnt)
773{
774 if (size == SPAGE_SIZE) {
Cho KyongHo61128f02014-05-12 11:44:47 +0530775 if (!lv2ent_fault(pent)) {
776 WARN(1, "Trying mapping on 4KiB where mapping exists");
KyongHo Cho2a965362012-05-12 05:56:09 +0900777 return -EADDRINUSE;
Cho KyongHo61128f02014-05-12 11:44:47 +0530778 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900779
780 *pent = mk_lv2ent_spage(paddr);
781 pgtable_flush(pent, pent + 1);
782 *pgcnt -= 1;
783 } else { /* size == LPAGE_SIZE */
784 int i;
785 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
786 if (!lv2ent_fault(pent)) {
Cho KyongHo61128f02014-05-12 11:44:47 +0530787 WARN(1,
788 "Trying mapping on 64KiB where mapping exists");
789 if (i > 0)
790 memset(pent - i, 0, sizeof(*pent) * i);
KyongHo Cho2a965362012-05-12 05:56:09 +0900791 return -EADDRINUSE;
792 }
793
794 *pent = mk_lv2ent_lpage(paddr);
795 }
796 pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
797 *pgcnt -= SPAGES_PER_LPAGE;
798 }
799
800 return 0;
801}
802
803static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova,
804 phys_addr_t paddr, size_t size, int prot)
805{
806 struct exynos_iommu_domain *priv = domain->priv;
807 unsigned long *entry;
808 unsigned long flags;
809 int ret = -ENOMEM;
810
811 BUG_ON(priv->pgtable == NULL);
812
813 spin_lock_irqsave(&priv->pgtablelock, flags);
814
815 entry = section_entry(priv->pgtable, iova);
816
817 if (size == SECT_SIZE) {
Cho KyongHo61128f02014-05-12 11:44:47 +0530818 ret = lv1set_section(entry, iova, paddr,
KyongHo Cho2a965362012-05-12 05:56:09 +0900819 &priv->lv2entcnt[lv1ent_offset(iova)]);
820 } else {
821 unsigned long *pent;
822
823 pent = alloc_lv2entry(entry, iova,
824 &priv->lv2entcnt[lv1ent_offset(iova)]);
825
Cho KyongHo61128f02014-05-12 11:44:47 +0530826 if (IS_ERR(pent))
827 ret = PTR_ERR(pent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900828 else
829 ret = lv2set_page(pent, paddr, size,
830 &priv->lv2entcnt[lv1ent_offset(iova)]);
831 }
832
Cho KyongHo61128f02014-05-12 11:44:47 +0530833 if (ret)
KyongHo Cho2a965362012-05-12 05:56:09 +0900834 pr_debug("%s: Failed to map iova 0x%lx/0x%x bytes\n",
835 __func__, iova, size);
KyongHo Cho2a965362012-05-12 05:56:09 +0900836
837 spin_unlock_irqrestore(&priv->pgtablelock, flags);
838
839 return ret;
840}
841
842static size_t exynos_iommu_unmap(struct iommu_domain *domain,
843 unsigned long iova, size_t size)
844{
845 struct exynos_iommu_domain *priv = domain->priv;
846 struct sysmmu_drvdata *data;
847 unsigned long flags;
848 unsigned long *ent;
Cho KyongHo61128f02014-05-12 11:44:47 +0530849 size_t err_pgsize;
KyongHo Cho2a965362012-05-12 05:56:09 +0900850
851 BUG_ON(priv->pgtable == NULL);
852
853 spin_lock_irqsave(&priv->pgtablelock, flags);
854
855 ent = section_entry(priv->pgtable, iova);
856
857 if (lv1ent_section(ent)) {
Cho KyongHo61128f02014-05-12 11:44:47 +0530858 if (size < SECT_SIZE) {
859 err_pgsize = SECT_SIZE;
860 goto err;
861 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900862
863 *ent = 0;
864 pgtable_flush(ent, ent + 1);
865 size = SECT_SIZE;
866 goto done;
867 }
868
869 if (unlikely(lv1ent_fault(ent))) {
870 if (size > SECT_SIZE)
871 size = SECT_SIZE;
872 goto done;
873 }
874
875 /* lv1ent_page(sent) == true here */
876
877 ent = page_entry(ent, iova);
878
879 if (unlikely(lv2ent_fault(ent))) {
880 size = SPAGE_SIZE;
881 goto done;
882 }
883
884 if (lv2ent_small(ent)) {
885 *ent = 0;
886 size = SPAGE_SIZE;
Cho KyongHo6cb47ed2014-05-12 11:44:51 +0530887 pgtable_flush(ent, ent + 1);
KyongHo Cho2a965362012-05-12 05:56:09 +0900888 priv->lv2entcnt[lv1ent_offset(iova)] += 1;
889 goto done;
890 }
891
892 /* lv1ent_large(ent) == true here */
Cho KyongHo61128f02014-05-12 11:44:47 +0530893 if (size < LPAGE_SIZE) {
894 err_pgsize = LPAGE_SIZE;
895 goto err;
896 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900897
898 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
Cho KyongHo6cb47ed2014-05-12 11:44:51 +0530899 pgtable_flush(ent, ent + SPAGES_PER_LPAGE);
KyongHo Cho2a965362012-05-12 05:56:09 +0900900
901 size = LPAGE_SIZE;
902 priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
903done:
904 spin_unlock_irqrestore(&priv->pgtablelock, flags);
905
906 spin_lock_irqsave(&priv->lock, flags);
907 list_for_each_entry(data, &priv->clients, node)
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530908 sysmmu_tlb_invalidate_entry(data->dev, iova, size);
KyongHo Cho2a965362012-05-12 05:56:09 +0900909 spin_unlock_irqrestore(&priv->lock, flags);
910
KyongHo Cho2a965362012-05-12 05:56:09 +0900911 return size;
Cho KyongHo61128f02014-05-12 11:44:47 +0530912err:
913 spin_unlock_irqrestore(&priv->pgtablelock, flags);
914
915 WARN(1,
916 "%s: Failed due to size(%#x) @ %#08lx is smaller than page size %#x\n",
917 __func__, size, iova, err_pgsize);
918
919 return 0;
KyongHo Cho2a965362012-05-12 05:56:09 +0900920}
921
922static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547a2013-03-29 01:23:58 +0530923 dma_addr_t iova)
KyongHo Cho2a965362012-05-12 05:56:09 +0900924{
925 struct exynos_iommu_domain *priv = domain->priv;
926 unsigned long *entry;
927 unsigned long flags;
928 phys_addr_t phys = 0;
929
930 spin_lock_irqsave(&priv->pgtablelock, flags);
931
932 entry = section_entry(priv->pgtable, iova);
933
934 if (lv1ent_section(entry)) {
935 phys = section_phys(entry) + section_offs(iova);
936 } else if (lv1ent_page(entry)) {
937 entry = page_entry(entry, iova);
938
939 if (lv2ent_large(entry))
940 phys = lpage_phys(entry) + lpage_offs(iova);
941 else if (lv2ent_small(entry))
942 phys = spage_phys(entry) + spage_offs(iova);
943 }
944
945 spin_unlock_irqrestore(&priv->pgtablelock, flags);
946
947 return phys;
948}
949
950static struct iommu_ops exynos_iommu_ops = {
951 .domain_init = &exynos_iommu_domain_init,
952 .domain_destroy = &exynos_iommu_domain_destroy,
953 .attach_dev = &exynos_iommu_attach_device,
954 .detach_dev = &exynos_iommu_detach_device,
955 .map = &exynos_iommu_map,
956 .unmap = &exynos_iommu_unmap,
957 .iova_to_phys = &exynos_iommu_iova_to_phys,
958 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
959};
960
961static int __init exynos_iommu_init(void)
962{
963 int ret;
964
Cho KyongHo734c3c72014-05-12 11:44:48 +0530965 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
966 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
967 if (!lv2table_kmem_cache) {
968 pr_err("%s: Failed to create kmem cache\n", __func__);
969 return -ENOMEM;
970 }
971
KyongHo Cho2a965362012-05-12 05:56:09 +0900972 ret = platform_driver_register(&exynos_sysmmu_driver);
Cho KyongHo734c3c72014-05-12 11:44:48 +0530973 if (ret) {
974 pr_err("%s: Failed to register driver\n", __func__);
975 goto err_reg_driver;
976 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900977
Cho KyongHo734c3c72014-05-12 11:44:48 +0530978 ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
979 if (ret) {
980 pr_err("%s: Failed to register exynos-iommu driver.\n",
981 __func__);
982 goto err_set_iommu;
983 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900984
Cho KyongHo734c3c72014-05-12 11:44:48 +0530985 return 0;
986err_set_iommu:
987 platform_driver_unregister(&exynos_sysmmu_driver);
988err_reg_driver:
989 kmem_cache_destroy(lv2table_kmem_cache);
KyongHo Cho2a965362012-05-12 05:56:09 +0900990 return ret;
991}
992subsys_initcall(exynos_iommu_init);