blob: bf6b826b1d8bb52bdd1eea640982197073998759 [file] [log] [blame]
KyongHo Cho2a965362012-05-12 05:56:09 +09001/* linux/drivers/iommu/exynos_iommu.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
12#define DEBUG
13#endif
14
KyongHo Cho2a965362012-05-12 05:56:09 +090015#include <linux/clk.h>
Marek Szyprowski8ed55c82015-05-19 15:20:36 +020016#include <linux/dma-mapping.h>
KyongHo Cho2a965362012-05-12 05:56:09 +090017#include <linux/err.h>
Marek Szyprowski312900c2015-05-19 15:20:30 +020018#include <linux/io.h>
KyongHo Cho2a965362012-05-12 05:56:09 +090019#include <linux/iommu.h>
Marek Szyprowski312900c2015-05-19 15:20:30 +020020#include <linux/interrupt.h>
KyongHo Cho2a965362012-05-12 05:56:09 +090021#include <linux/list.h>
Marek Szyprowski8ed55c82015-05-19 15:20:36 +020022#include <linux/of.h>
23#include <linux/of_iommu.h>
24#include <linux/of_platform.h>
Marek Szyprowski312900c2015-05-19 15:20:30 +020025#include <linux/platform_device.h>
26#include <linux/pm_runtime.h>
27#include <linux/slab.h>
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +010028#include <linux/dma-iommu.h>
KyongHo Cho2a965362012-05-12 05:56:09 +090029
Cho KyongHod09d78f2014-05-12 11:44:58 +053030typedef u32 sysmmu_iova_t;
31typedef u32 sysmmu_pte_t;
32
Sachin Kamatf171aba2014-08-04 10:06:28 +053033/* We do not consider super section mapping (16MB) */
KyongHo Cho2a965362012-05-12 05:56:09 +090034#define SECT_ORDER 20
35#define LPAGE_ORDER 16
36#define SPAGE_ORDER 12
37
38#define SECT_SIZE (1 << SECT_ORDER)
39#define LPAGE_SIZE (1 << LPAGE_ORDER)
40#define SPAGE_SIZE (1 << SPAGE_ORDER)
41
42#define SECT_MASK (~(SECT_SIZE - 1))
43#define LPAGE_MASK (~(LPAGE_SIZE - 1))
44#define SPAGE_MASK (~(SPAGE_SIZE - 1))
45
Cho KyongHo66a7ed82014-05-12 11:45:04 +053046#define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
47 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
48#define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
49#define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
50#define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
51 ((*(sent) & 3) == 1))
KyongHo Cho2a965362012-05-12 05:56:09 +090052#define lv1ent_section(sent) ((*(sent) & 3) == 2)
53
54#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
55#define lv2ent_small(pent) ((*(pent) & 2) == 2)
56#define lv2ent_large(pent) ((*(pent) & 3) == 1)
57
Cho KyongHod09d78f2014-05-12 11:44:58 +053058static u32 sysmmu_page_offset(sysmmu_iova_t iova, u32 size)
59{
60 return iova & (size - 1);
61}
KyongHo Cho2a965362012-05-12 05:56:09 +090062
Cho KyongHod09d78f2014-05-12 11:44:58 +053063#define section_phys(sent) (*(sent) & SECT_MASK)
64#define section_offs(iova) sysmmu_page_offset((iova), SECT_SIZE)
65#define lpage_phys(pent) (*(pent) & LPAGE_MASK)
66#define lpage_offs(iova) sysmmu_page_offset((iova), LPAGE_SIZE)
67#define spage_phys(pent) (*(pent) & SPAGE_MASK)
68#define spage_offs(iova) sysmmu_page_offset((iova), SPAGE_SIZE)
KyongHo Cho2a965362012-05-12 05:56:09 +090069
70#define NUM_LV1ENTRIES 4096
Cho KyongHod09d78f2014-05-12 11:44:58 +053071#define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
KyongHo Cho2a965362012-05-12 05:56:09 +090072
Cho KyongHod09d78f2014-05-12 11:44:58 +053073static u32 lv1ent_offset(sysmmu_iova_t iova)
74{
75 return iova >> SECT_ORDER;
76}
77
78static u32 lv2ent_offset(sysmmu_iova_t iova)
79{
80 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
81}
82
Marek Szyprowski5e3435e2016-02-18 15:12:50 +010083#define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
Cho KyongHod09d78f2014-05-12 11:44:58 +053084#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
KyongHo Cho2a965362012-05-12 05:56:09 +090085
86#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
87
88#define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
89
90#define mk_lv1ent_sect(pa) ((pa) | 2)
91#define mk_lv1ent_page(pa) ((pa) | 1)
92#define mk_lv2ent_lpage(pa) ((pa) | 1)
93#define mk_lv2ent_spage(pa) ((pa) | 2)
94
95#define CTRL_ENABLE 0x5
96#define CTRL_BLOCK 0x7
97#define CTRL_DISABLE 0x0
98
Cho KyongHoeeb51842014-05-12 11:45:03 +053099#define CFG_LRU 0x1
100#define CFG_QOS(n) ((n & 0xF) << 7)
101#define CFG_MASK 0x0150FFFF /* Selecting bit 0-15, 20, 22 and 24 */
102#define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
103#define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
104#define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
105
KyongHo Cho2a965362012-05-12 05:56:09 +0900106#define REG_MMU_CTRL 0x000
107#define REG_MMU_CFG 0x004
108#define REG_MMU_STATUS 0x008
109#define REG_MMU_FLUSH 0x00C
110#define REG_MMU_FLUSH_ENTRY 0x010
111#define REG_PT_BASE_ADDR 0x014
112#define REG_INT_STATUS 0x018
113#define REG_INT_CLEAR 0x01C
114
115#define REG_PAGE_FAULT_ADDR 0x024
116#define REG_AW_FAULT_ADDR 0x028
117#define REG_AR_FAULT_ADDR 0x02C
118#define REG_DEFAULT_SLAVE_ADDR 0x030
119
120#define REG_MMU_VERSION 0x034
121
Cho KyongHoeeb51842014-05-12 11:45:03 +0530122#define MMU_MAJ_VER(val) ((val) >> 7)
123#define MMU_MIN_VER(val) ((val) & 0x7F)
124#define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
125
126#define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
127
KyongHo Cho2a965362012-05-12 05:56:09 +0900128#define REG_PB0_SADDR 0x04C
129#define REG_PB0_EADDR 0x050
130#define REG_PB1_SADDR 0x054
131#define REG_PB1_EADDR 0x058
132
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530133#define has_sysmmu(dev) (dev->archdata.iommu != NULL)
134
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100135static struct device *dma_dev;
Cho KyongHo734c3c72014-05-12 11:44:48 +0530136static struct kmem_cache *lv2table_kmem_cache;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530137static sysmmu_pte_t *zero_lv2_table;
138#define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
Cho KyongHo734c3c72014-05-12 11:44:48 +0530139
Cho KyongHod09d78f2014-05-12 11:44:58 +0530140static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
KyongHo Cho2a965362012-05-12 05:56:09 +0900141{
142 return pgtable + lv1ent_offset(iova);
143}
144
Cho KyongHod09d78f2014-05-12 11:44:58 +0530145static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
KyongHo Cho2a965362012-05-12 05:56:09 +0900146{
Cho KyongHod09d78f2014-05-12 11:44:58 +0530147 return (sysmmu_pte_t *)phys_to_virt(
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530148 lv2table_base(sent)) + lv2ent_offset(iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900149}
150
151enum exynos_sysmmu_inttype {
152 SYSMMU_PAGEFAULT,
153 SYSMMU_AR_MULTIHIT,
154 SYSMMU_AW_MULTIHIT,
155 SYSMMU_BUSERROR,
156 SYSMMU_AR_SECURITY,
157 SYSMMU_AR_ACCESS,
158 SYSMMU_AW_SECURITY,
159 SYSMMU_AW_PROTECTION, /* 7 */
160 SYSMMU_FAULT_UNKNOWN,
161 SYSMMU_FAULTS_NUM
162};
163
KyongHo Cho2a965362012-05-12 05:56:09 +0900164static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
165 REG_PAGE_FAULT_ADDR,
166 REG_AR_FAULT_ADDR,
167 REG_AW_FAULT_ADDR,
168 REG_DEFAULT_SLAVE_ADDR,
169 REG_AR_FAULT_ADDR,
170 REG_AR_FAULT_ADDR,
171 REG_AW_FAULT_ADDR,
172 REG_AW_FAULT_ADDR
173};
174
175static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
176 "PAGE FAULT",
177 "AR MULTI-HIT FAULT",
178 "AW MULTI-HIT FAULT",
179 "BUS ERROR",
180 "AR SECURITY PROTECTION FAULT",
181 "AR ACCESS PROTECTION FAULT",
182 "AW SECURITY PROTECTION FAULT",
183 "AW ACCESS PROTECTION FAULT",
184 "UNKNOWN FAULT"
185};
186
Marek Szyprowski2860af32015-05-19 15:20:31 +0200187/*
188 * This structure is attached to dev.archdata.iommu of the master device
189 * on device add, contains a list of SYSMMU controllers defined by device tree,
190 * which are bound to given master device. It is usually referenced by 'owner'
191 * pointer.
192*/
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530193struct exynos_iommu_owner {
Marek Szyprowski1b092052015-05-19 15:20:33 +0200194 struct list_head controllers; /* list of sysmmu_drvdata.owner_node */
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530195};
196
Marek Szyprowski2860af32015-05-19 15:20:31 +0200197/*
198 * This structure exynos specific generalization of struct iommu_domain.
199 * It contains list of SYSMMU controllers from all master devices, which has
200 * been attached to this domain and page tables of IO address space defined by
201 * it. It is usually referenced by 'domain' pointer.
202 */
KyongHo Cho2a965362012-05-12 05:56:09 +0900203struct exynos_iommu_domain {
Marek Szyprowski2860af32015-05-19 15:20:31 +0200204 struct list_head clients; /* list of sysmmu_drvdata.domain_node */
205 sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
206 short *lv2entcnt; /* free lv2 entry counter for each section */
207 spinlock_t lock; /* lock for modyfying list of clients */
208 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100209 struct iommu_domain domain; /* generic domain data structure */
KyongHo Cho2a965362012-05-12 05:56:09 +0900210};
211
Marek Szyprowski2860af32015-05-19 15:20:31 +0200212/*
213 * This structure hold all data of a single SYSMMU controller, this includes
214 * hw resources like registers and clocks, pointers and list nodes to connect
215 * it to all other structures, internal state and parameters read from device
216 * tree. It is usually referenced by 'data' pointer.
217 */
KyongHo Cho2a965362012-05-12 05:56:09 +0900218struct sysmmu_drvdata {
Marek Szyprowski2860af32015-05-19 15:20:31 +0200219 struct device *sysmmu; /* SYSMMU controller device */
220 struct device *master; /* master device (owner) */
221 void __iomem *sfrbase; /* our registers */
222 struct clk *clk; /* SYSMMU's clock */
223 struct clk *clk_master; /* master's device clock */
224 int activations; /* number of calls to sysmmu_enable */
225 spinlock_t lock; /* lock for modyfying state */
226 struct exynos_iommu_domain *domain; /* domain we belong to */
227 struct list_head domain_node; /* node for domain clients list */
Marek Szyprowski1b092052015-05-19 15:20:33 +0200228 struct list_head owner_node; /* node for owner controllers list */
Marek Szyprowski2860af32015-05-19 15:20:31 +0200229 phys_addr_t pgtable; /* assigned page table structure */
230 unsigned int version; /* our version */
KyongHo Cho2a965362012-05-12 05:56:09 +0900231};
232
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100233static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
234{
235 return container_of(dom, struct exynos_iommu_domain, domain);
236}
237
KyongHo Cho2a965362012-05-12 05:56:09 +0900238static bool set_sysmmu_active(struct sysmmu_drvdata *data)
239{
240 /* return true if the System MMU was not active previously
241 and it needs to be initialized */
242 return ++data->activations == 1;
243}
244
245static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
246{
247 /* return true if the System MMU is needed to be disabled */
248 BUG_ON(data->activations < 1);
249 return --data->activations == 0;
250}
251
252static bool is_sysmmu_active(struct sysmmu_drvdata *data)
253{
254 return data->activations > 0;
255}
256
257static void sysmmu_unblock(void __iomem *sfrbase)
258{
259 __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
260}
261
262static bool sysmmu_block(void __iomem *sfrbase)
263{
264 int i = 120;
265
266 __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
267 while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
268 --i;
269
270 if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
271 sysmmu_unblock(sfrbase);
272 return false;
273 }
274
275 return true;
276}
277
278static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
279{
280 __raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
281}
282
283static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
Cho KyongHod09d78f2014-05-12 11:44:58 +0530284 sysmmu_iova_t iova, unsigned int num_inv)
KyongHo Cho2a965362012-05-12 05:56:09 +0900285{
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530286 unsigned int i;
Sachin Kamat365409d2014-05-22 09:50:56 +0530287
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530288 for (i = 0; i < num_inv; i++) {
289 __raw_writel((iova & SPAGE_MASK) | 1,
290 sfrbase + REG_MMU_FLUSH_ENTRY);
291 iova += SPAGE_SIZE;
292 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900293}
294
295static void __sysmmu_set_ptbase(void __iomem *sfrbase,
Cho KyongHod09d78f2014-05-12 11:44:58 +0530296 phys_addr_t pgd)
KyongHo Cho2a965362012-05-12 05:56:09 +0900297{
KyongHo Cho2a965362012-05-12 05:56:09 +0900298 __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
299
300 __sysmmu_tlb_invalidate(sfrbase);
301}
302
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530303static void show_fault_information(const char *name,
304 enum exynos_sysmmu_inttype itype,
Cho KyongHod09d78f2014-05-12 11:44:58 +0530305 phys_addr_t pgtable_base, sysmmu_iova_t fault_addr)
KyongHo Cho2a965362012-05-12 05:56:09 +0900306{
Cho KyongHod09d78f2014-05-12 11:44:58 +0530307 sysmmu_pte_t *ent;
KyongHo Cho2a965362012-05-12 05:56:09 +0900308
309 if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
310 itype = SYSMMU_FAULT_UNKNOWN;
311
Cho KyongHod09d78f2014-05-12 11:44:58 +0530312 pr_err("%s occurred at %#x by %s(Page table base: %pa)\n",
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530313 sysmmu_fault_name[itype], fault_addr, name, &pgtable_base);
KyongHo Cho2a965362012-05-12 05:56:09 +0900314
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530315 ent = section_entry(phys_to_virt(pgtable_base), fault_addr);
Cho KyongHod09d78f2014-05-12 11:44:58 +0530316 pr_err("\tLv1 entry: %#x\n", *ent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900317
318 if (lv1ent_page(ent)) {
319 ent = page_entry(ent, fault_addr);
Cho KyongHod09d78f2014-05-12 11:44:58 +0530320 pr_err("\t Lv2 entry: %#x\n", *ent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900321 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900322}
323
324static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
325{
Sachin Kamatf171aba2014-08-04 10:06:28 +0530326 /* SYSMMU is in blocked state when interrupt occurred. */
KyongHo Cho2a965362012-05-12 05:56:09 +0900327 struct sysmmu_drvdata *data = dev_id;
KyongHo Cho2a965362012-05-12 05:56:09 +0900328 enum exynos_sysmmu_inttype itype;
Cho KyongHod09d78f2014-05-12 11:44:58 +0530329 sysmmu_iova_t addr = -1;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530330 int ret = -ENOSYS;
KyongHo Cho2a965362012-05-12 05:56:09 +0900331
KyongHo Cho2a965362012-05-12 05:56:09 +0900332 WARN_ON(!is_sysmmu_active(data));
333
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530334 spin_lock(&data->lock);
335
Marek Szyprowskib398af22016-02-18 15:12:51 +0100336 clk_enable(data->clk_master);
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530337
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530338 itype = (enum exynos_sysmmu_inttype)
339 __ffs(__raw_readl(data->sfrbase + REG_INT_STATUS));
340 if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
KyongHo Cho2a965362012-05-12 05:56:09 +0900341 itype = SYSMMU_FAULT_UNKNOWN;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530342 else
343 addr = __raw_readl(data->sfrbase + fault_reg_offset[itype]);
KyongHo Cho2a965362012-05-12 05:56:09 +0900344
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530345 if (itype == SYSMMU_FAULT_UNKNOWN) {
346 pr_err("%s: Fault is not occurred by System MMU '%s'!\n",
347 __func__, dev_name(data->sysmmu));
348 pr_err("%s: Please check if IRQ is correctly configured.\n",
349 __func__);
350 BUG();
351 } else {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530352 unsigned int base =
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530353 __raw_readl(data->sfrbase + REG_PT_BASE_ADDR);
354 show_fault_information(dev_name(data->sysmmu),
355 itype, base, addr);
356 if (data->domain)
Marek Szyprowskia9133b992015-05-19 15:20:29 +0200357 ret = report_iommu_fault(&data->domain->domain,
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530358 data->master, addr, itype);
KyongHo Cho2a965362012-05-12 05:56:09 +0900359 }
360
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530361 /* fault is not recovered by fault handler */
362 BUG_ON(ret != 0);
KyongHo Cho2a965362012-05-12 05:56:09 +0900363
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530364 __raw_writel(1 << itype, data->sfrbase + REG_INT_CLEAR);
365
366 sysmmu_unblock(data->sfrbase);
KyongHo Cho2a965362012-05-12 05:56:09 +0900367
Marek Szyprowskib398af22016-02-18 15:12:51 +0100368 clk_disable(data->clk_master);
Cho KyongHo70605872014-05-12 11:44:55 +0530369
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530370 spin_unlock(&data->lock);
KyongHo Cho2a965362012-05-12 05:56:09 +0900371
372 return IRQ_HANDLED;
373}
374
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530375static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data)
KyongHo Cho2a965362012-05-12 05:56:09 +0900376{
Marek Szyprowskib398af22016-02-18 15:12:51 +0100377 clk_enable(data->clk_master);
Cho KyongHo70605872014-05-12 11:44:55 +0530378
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530379 __raw_writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530380 __raw_writel(0, data->sfrbase + REG_MMU_CFG);
KyongHo Cho2a965362012-05-12 05:56:09 +0900381
Cho KyongHo46c16d12014-05-12 11:44:54 +0530382 clk_disable(data->clk);
Marek Szyprowskib398af22016-02-18 15:12:51 +0100383 clk_disable(data->clk_master);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530384}
KyongHo Cho2a965362012-05-12 05:56:09 +0900385
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530386static bool __sysmmu_disable(struct sysmmu_drvdata *data)
387{
388 bool disabled;
389 unsigned long flags;
390
391 spin_lock_irqsave(&data->lock, flags);
392
393 disabled = set_sysmmu_inactive(data);
394
395 if (disabled) {
396 data->pgtable = 0;
397 data->domain = NULL;
398
399 __sysmmu_disable_nocount(data);
400
401 dev_dbg(data->sysmmu, "Disabled\n");
402 } else {
403 dev_dbg(data->sysmmu, "%d times left to disable\n",
404 data->activations);
405 }
406
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530407 spin_unlock_irqrestore(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900408
KyongHo Cho2a965362012-05-12 05:56:09 +0900409 return disabled;
410}
411
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530412static void __sysmmu_init_config(struct sysmmu_drvdata *data)
413{
Cho KyongHoeeb51842014-05-12 11:45:03 +0530414 unsigned int cfg = CFG_LRU | CFG_QOS(15);
415 unsigned int ver;
416
Marek Szyprowski512bd0c2015-05-19 15:20:24 +0200417 ver = MMU_RAW_VER(__raw_readl(data->sfrbase + REG_MMU_VERSION));
Cho KyongHoeeb51842014-05-12 11:45:03 +0530418 if (MMU_MAJ_VER(ver) == 3) {
419 if (MMU_MIN_VER(ver) >= 2) {
420 cfg |= CFG_FLPDCACHE;
421 if (MMU_MIN_VER(ver) == 3) {
422 cfg |= CFG_ACGEN;
423 cfg &= ~CFG_LRU;
424 } else {
425 cfg |= CFG_SYSSEL;
426 }
427 }
428 }
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530429
430 __raw_writel(cfg, data->sfrbase + REG_MMU_CFG);
Marek Szyprowski512bd0c2015-05-19 15:20:24 +0200431 data->version = ver;
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530432}
433
434static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
435{
Marek Szyprowskib398af22016-02-18 15:12:51 +0100436 clk_enable(data->clk_master);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530437 clk_enable(data->clk);
438
439 __raw_writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
440
441 __sysmmu_init_config(data);
442
443 __sysmmu_set_ptbase(data->sfrbase, data->pgtable);
444
445 __raw_writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
446
Marek Szyprowskib398af22016-02-18 15:12:51 +0100447 clk_disable(data->clk_master);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530448}
449
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200450static int __sysmmu_enable(struct sysmmu_drvdata *data, phys_addr_t pgtable,
Marek Szyprowskia9133b992015-05-19 15:20:29 +0200451 struct exynos_iommu_domain *domain)
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530452{
453 int ret = 0;
454 unsigned long flags;
455
456 spin_lock_irqsave(&data->lock, flags);
457 if (set_sysmmu_active(data)) {
458 data->pgtable = pgtable;
Marek Szyprowskia9133b992015-05-19 15:20:29 +0200459 data->domain = domain;
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530460
461 __sysmmu_enable_nocount(data);
462
463 dev_dbg(data->sysmmu, "Enabled\n");
464 } else {
465 ret = (pgtable == data->pgtable) ? 1 : -EBUSY;
466
467 dev_dbg(data->sysmmu, "already enabled\n");
468 }
469
470 if (WARN_ON(ret < 0))
471 set_sysmmu_inactive(data); /* decrement count */
472
473 spin_unlock_irqrestore(&data->lock, flags);
474
475 return ret;
476}
477
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530478static void __sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
479 sysmmu_iova_t iova)
480{
Marek Szyprowski512bd0c2015-05-19 15:20:24 +0200481 if (data->version == MAKE_MMU_VER(3, 3))
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530482 __raw_writel(iova | 0x1, data->sfrbase + REG_MMU_FLUSH_ENTRY);
483}
484
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200485static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530486 sysmmu_iova_t iova)
487{
488 unsigned long flags;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530489
Marek Szyprowskib398af22016-02-18 15:12:51 +0100490 clk_enable(data->clk_master);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530491
492 spin_lock_irqsave(&data->lock, flags);
493 if (is_sysmmu_active(data))
494 __sysmmu_tlb_invalidate_flpdcache(data, iova);
495 spin_unlock_irqrestore(&data->lock, flags);
496
Marek Szyprowskib398af22016-02-18 15:12:51 +0100497 clk_disable(data->clk_master);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530498}
499
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200500static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
501 sysmmu_iova_t iova, size_t size)
KyongHo Cho2a965362012-05-12 05:56:09 +0900502{
503 unsigned long flags;
KyongHo Cho2a965362012-05-12 05:56:09 +0900504
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530505 spin_lock_irqsave(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900506 if (is_sysmmu_active(data)) {
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530507 unsigned int num_inv = 1;
Cho KyongHo70605872014-05-12 11:44:55 +0530508
Marek Szyprowskib398af22016-02-18 15:12:51 +0100509 clk_enable(data->clk_master);
Cho KyongHo70605872014-05-12 11:44:55 +0530510
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530511 /*
512 * L2TLB invalidation required
513 * 4KB page: 1 invalidation
Sachin Kamatf171aba2014-08-04 10:06:28 +0530514 * 64KB page: 16 invalidations
515 * 1MB page: 64 invalidations
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530516 * because it is set-associative TLB
517 * with 8-way and 64 sets.
518 * 1MB page can be cached in one of all sets.
519 * 64KB page can be one of 16 consecutive sets.
520 */
Marek Szyprowski512bd0c2015-05-19 15:20:24 +0200521 if (MMU_MAJ_VER(data->version) == 2)
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530522 num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
523
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530524 if (sysmmu_block(data->sfrbase)) {
525 __sysmmu_tlb_invalidate_entry(
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530526 data->sfrbase, iova, num_inv);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530527 sysmmu_unblock(data->sfrbase);
KyongHo Cho2a965362012-05-12 05:56:09 +0900528 }
Marek Szyprowskib398af22016-02-18 15:12:51 +0100529 clk_disable(data->clk_master);
KyongHo Cho2a965362012-05-12 05:56:09 +0900530 } else {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200531 dev_dbg(data->master,
532 "disabled. Skipping TLB invalidation @ %#x\n", iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900533 }
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530534 spin_unlock_irqrestore(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900535}
536
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530537static int __init exynos_sysmmu_probe(struct platform_device *pdev)
KyongHo Cho2a965362012-05-12 05:56:09 +0900538{
Cho KyongHo46c16d12014-05-12 11:44:54 +0530539 int irq, ret;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530540 struct device *dev = &pdev->dev;
KyongHo Cho2a965362012-05-12 05:56:09 +0900541 struct sysmmu_drvdata *data;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530542 struct resource *res;
KyongHo Cho2a965362012-05-12 05:56:09 +0900543
Cho KyongHo46c16d12014-05-12 11:44:54 +0530544 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
545 if (!data)
546 return -ENOMEM;
KyongHo Cho2a965362012-05-12 05:56:09 +0900547
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530548 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Cho KyongHo46c16d12014-05-12 11:44:54 +0530549 data->sfrbase = devm_ioremap_resource(dev, res);
550 if (IS_ERR(data->sfrbase))
551 return PTR_ERR(data->sfrbase);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530552
Cho KyongHo46c16d12014-05-12 11:44:54 +0530553 irq = platform_get_irq(pdev, 0);
554 if (irq <= 0) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +0530555 dev_err(dev, "Unable to find IRQ resource\n");
Cho KyongHo46c16d12014-05-12 11:44:54 +0530556 return irq;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530557 }
558
Cho KyongHo46c16d12014-05-12 11:44:54 +0530559 ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530560 dev_name(dev), data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900561 if (ret) {
Cho KyongHo46c16d12014-05-12 11:44:54 +0530562 dev_err(dev, "Unabled to register handler of irq %d\n", irq);
563 return ret;
KyongHo Cho2a965362012-05-12 05:56:09 +0900564 }
565
Cho KyongHo46c16d12014-05-12 11:44:54 +0530566 data->clk = devm_clk_get(dev, "sysmmu");
567 if (IS_ERR(data->clk)) {
568 dev_err(dev, "Failed to get clock!\n");
569 return PTR_ERR(data->clk);
570 } else {
571 ret = clk_prepare(data->clk);
572 if (ret) {
573 dev_err(dev, "Failed to prepare clk\n");
574 return ret;
575 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900576 }
577
Cho KyongHo70605872014-05-12 11:44:55 +0530578 data->clk_master = devm_clk_get(dev, "master");
579 if (!IS_ERR(data->clk_master)) {
580 ret = clk_prepare(data->clk_master);
581 if (ret) {
582 clk_unprepare(data->clk);
583 dev_err(dev, "Failed to prepare master's clk\n");
584 return ret;
585 }
Marek Szyprowskib398af22016-02-18 15:12:51 +0100586 } else {
587 data->clk_master = NULL;
Cho KyongHo70605872014-05-12 11:44:55 +0530588 }
589
KyongHo Cho2a965362012-05-12 05:56:09 +0900590 data->sysmmu = dev;
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530591 spin_lock_init(&data->lock);
KyongHo Cho2a965362012-05-12 05:56:09 +0900592
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530593 platform_set_drvdata(pdev, data);
594
Cho KyongHof4723ec2014-05-12 11:44:52 +0530595 pm_runtime_enable(dev);
KyongHo Cho2a965362012-05-12 05:56:09 +0900596
KyongHo Cho2a965362012-05-12 05:56:09 +0900597 return 0;
KyongHo Cho2a965362012-05-12 05:56:09 +0900598}
599
Marek Szyprowski622015e2015-05-19 15:20:35 +0200600#ifdef CONFIG_PM_SLEEP
601static int exynos_sysmmu_suspend(struct device *dev)
602{
603 struct sysmmu_drvdata *data = dev_get_drvdata(dev);
604
605 dev_dbg(dev, "suspend\n");
606 if (is_sysmmu_active(data)) {
607 __sysmmu_disable_nocount(data);
608 pm_runtime_put(dev);
609 }
610 return 0;
611}
612
613static int exynos_sysmmu_resume(struct device *dev)
614{
615 struct sysmmu_drvdata *data = dev_get_drvdata(dev);
616
617 dev_dbg(dev, "resume\n");
618 if (is_sysmmu_active(data)) {
619 pm_runtime_get_sync(dev);
620 __sysmmu_enable_nocount(data);
621 }
622 return 0;
623}
624#endif
625
626static const struct dev_pm_ops sysmmu_pm_ops = {
627 SET_LATE_SYSTEM_SLEEP_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume)
628};
629
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530630static const struct of_device_id sysmmu_of_match[] __initconst = {
631 { .compatible = "samsung,exynos-sysmmu", },
632 { },
633};
634
635static struct platform_driver exynos_sysmmu_driver __refdata = {
636 .probe = exynos_sysmmu_probe,
637 .driver = {
KyongHo Cho2a965362012-05-12 05:56:09 +0900638 .name = "exynos-sysmmu",
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530639 .of_match_table = sysmmu_of_match,
Marek Szyprowski622015e2015-05-19 15:20:35 +0200640 .pm = &sysmmu_pm_ops,
KyongHo Cho2a965362012-05-12 05:56:09 +0900641 }
642};
643
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100644static inline void update_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
KyongHo Cho2a965362012-05-12 05:56:09 +0900645{
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100646 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
647 DMA_TO_DEVICE);
648 *ent = val;
649 dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent),
650 DMA_TO_DEVICE);
KyongHo Cho2a965362012-05-12 05:56:09 +0900651}
652
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100653static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
KyongHo Cho2a965362012-05-12 05:56:09 +0900654{
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200655 struct exynos_iommu_domain *domain;
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100656 dma_addr_t handle;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530657 int i;
KyongHo Cho2a965362012-05-12 05:56:09 +0900658
KyongHo Cho2a965362012-05-12 05:56:09 +0900659
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200660 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
661 if (!domain)
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100662 return NULL;
663
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +0100664 if (type == IOMMU_DOMAIN_DMA) {
665 if (iommu_get_dma_cookie(&domain->domain) != 0)
666 goto err_pgtable;
667 } else if (type != IOMMU_DOMAIN_UNMANAGED) {
668 goto err_pgtable;
669 }
670
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200671 domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
672 if (!domain->pgtable)
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +0100673 goto err_dma_cookie;
KyongHo Cho2a965362012-05-12 05:56:09 +0900674
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200675 domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
676 if (!domain->lv2entcnt)
KyongHo Cho2a965362012-05-12 05:56:09 +0900677 goto err_counter;
678
Sachin Kamatf171aba2014-08-04 10:06:28 +0530679 /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530680 for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200681 domain->pgtable[i + 0] = ZERO_LV2LINK;
682 domain->pgtable[i + 1] = ZERO_LV2LINK;
683 domain->pgtable[i + 2] = ZERO_LV2LINK;
684 domain->pgtable[i + 3] = ZERO_LV2LINK;
685 domain->pgtable[i + 4] = ZERO_LV2LINK;
686 domain->pgtable[i + 5] = ZERO_LV2LINK;
687 domain->pgtable[i + 6] = ZERO_LV2LINK;
688 domain->pgtable[i + 7] = ZERO_LV2LINK;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530689 }
690
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100691 handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
692 DMA_TO_DEVICE);
693 /* For mapping page table entries we rely on dma == phys */
694 BUG_ON(handle != virt_to_phys(domain->pgtable));
KyongHo Cho2a965362012-05-12 05:56:09 +0900695
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200696 spin_lock_init(&domain->lock);
697 spin_lock_init(&domain->pgtablelock);
698 INIT_LIST_HEAD(&domain->clients);
KyongHo Cho2a965362012-05-12 05:56:09 +0900699
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200700 domain->domain.geometry.aperture_start = 0;
701 domain->domain.geometry.aperture_end = ~0UL;
702 domain->domain.geometry.force_aperture = true;
Joerg Roedel3177bb72012-07-11 12:41:10 +0200703
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200704 return &domain->domain;
KyongHo Cho2a965362012-05-12 05:56:09 +0900705
706err_counter:
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200707 free_pages((unsigned long)domain->pgtable, 2);
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +0100708err_dma_cookie:
709 if (type == IOMMU_DOMAIN_DMA)
710 iommu_put_dma_cookie(&domain->domain);
KyongHo Cho2a965362012-05-12 05:56:09 +0900711err_pgtable:
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200712 kfree(domain);
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100713 return NULL;
KyongHo Cho2a965362012-05-12 05:56:09 +0900714}
715
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200716static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
KyongHo Cho2a965362012-05-12 05:56:09 +0900717{
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200718 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200719 struct sysmmu_drvdata *data, *next;
KyongHo Cho2a965362012-05-12 05:56:09 +0900720 unsigned long flags;
721 int i;
722
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200723 WARN_ON(!list_empty(&domain->clients));
KyongHo Cho2a965362012-05-12 05:56:09 +0900724
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200725 spin_lock_irqsave(&domain->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900726
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200727 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200728 if (__sysmmu_disable(data))
729 data->master = NULL;
730 list_del_init(&data->domain_node);
KyongHo Cho2a965362012-05-12 05:56:09 +0900731 }
732
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200733 spin_unlock_irqrestore(&domain->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900734
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +0100735 if (iommu_domain->type == IOMMU_DOMAIN_DMA)
736 iommu_put_dma_cookie(iommu_domain);
737
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100738 dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE,
739 DMA_TO_DEVICE);
740
KyongHo Cho2a965362012-05-12 05:56:09 +0900741 for (i = 0; i < NUM_LV1ENTRIES; i++)
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100742 if (lv1ent_page(domain->pgtable + i)) {
743 phys_addr_t base = lv2table_base(domain->pgtable + i);
744
745 dma_unmap_single(dma_dev, base, LV2TABLE_SIZE,
746 DMA_TO_DEVICE);
Cho KyongHo734c3c72014-05-12 11:44:48 +0530747 kmem_cache_free(lv2table_kmem_cache,
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100748 phys_to_virt(base));
749 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900750
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200751 free_pages((unsigned long)domain->pgtable, 2);
752 free_pages((unsigned long)domain->lv2entcnt, 1);
753 kfree(domain);
KyongHo Cho2a965362012-05-12 05:56:09 +0900754}
755
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200756static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
KyongHo Cho2a965362012-05-12 05:56:09 +0900757 struct device *dev)
758{
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530759 struct exynos_iommu_owner *owner = dev->archdata.iommu;
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200760 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200761 struct sysmmu_drvdata *data;
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200762 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
KyongHo Cho2a965362012-05-12 05:56:09 +0900763 unsigned long flags;
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200764 int ret = -ENODEV;
KyongHo Cho2a965362012-05-12 05:56:09 +0900765
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200766 if (!has_sysmmu(dev))
767 return -ENODEV;
KyongHo Cho2a965362012-05-12 05:56:09 +0900768
Marek Szyprowski1b092052015-05-19 15:20:33 +0200769 list_for_each_entry(data, &owner->controllers, owner_node) {
Marek Szyprowskice70ca52015-05-19 15:20:34 +0200770 pm_runtime_get_sync(data->sysmmu);
Marek Szyprowskia9133b992015-05-19 15:20:29 +0200771 ret = __sysmmu_enable(data, pagetable, domain);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200772 if (ret >= 0) {
773 data->master = dev;
KyongHo Cho2a965362012-05-12 05:56:09 +0900774
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200775 spin_lock_irqsave(&domain->lock, flags);
776 list_add_tail(&data->domain_node, &domain->clients);
777 spin_unlock_irqrestore(&domain->lock, flags);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200778 }
779 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900780
781 if (ret < 0) {
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530782 dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
783 __func__, &pagetable);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530784 return ret;
KyongHo Cho2a965362012-05-12 05:56:09 +0900785 }
786
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530787 dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
788 __func__, &pagetable, (ret == 0) ? "" : ", again");
789
KyongHo Cho2a965362012-05-12 05:56:09 +0900790 return ret;
791}
792
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200793static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
KyongHo Cho2a965362012-05-12 05:56:09 +0900794 struct device *dev)
795{
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200796 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
797 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
Marek Szyprowski1b092052015-05-19 15:20:33 +0200798 struct sysmmu_drvdata *data, *next;
KyongHo Cho2a965362012-05-12 05:56:09 +0900799 unsigned long flags;
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200800 bool found = false;
801
802 if (!has_sysmmu(dev))
803 return;
KyongHo Cho2a965362012-05-12 05:56:09 +0900804
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200805 spin_lock_irqsave(&domain->lock, flags);
Marek Szyprowski1b092052015-05-19 15:20:33 +0200806 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200807 if (data->master == dev) {
808 if (__sysmmu_disable(data)) {
809 data->master = NULL;
810 list_del_init(&data->domain_node);
811 }
Marek Szyprowskice70ca52015-05-19 15:20:34 +0200812 pm_runtime_put(data->sysmmu);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200813 found = true;
KyongHo Cho2a965362012-05-12 05:56:09 +0900814 }
815 }
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200816 spin_unlock_irqrestore(&domain->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900817
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200818 if (found)
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530819 dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
820 __func__, &pagetable);
821 else
822 dev_err(dev, "%s: No IOMMU is attached\n", __func__);
KyongHo Cho2a965362012-05-12 05:56:09 +0900823}
824
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200825static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530826 sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
KyongHo Cho2a965362012-05-12 05:56:09 +0900827{
Cho KyongHo61128f02014-05-12 11:44:47 +0530828 if (lv1ent_section(sent)) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530829 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
Cho KyongHo61128f02014-05-12 11:44:47 +0530830 return ERR_PTR(-EADDRINUSE);
831 }
832
KyongHo Cho2a965362012-05-12 05:56:09 +0900833 if (lv1ent_fault(sent)) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530834 sysmmu_pte_t *pent;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530835 bool need_flush_flpd_cache = lv1ent_zero(sent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900836
Cho KyongHo734c3c72014-05-12 11:44:48 +0530837 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
Cho KyongHod09d78f2014-05-12 11:44:58 +0530838 BUG_ON((unsigned int)pent & (LV2TABLE_SIZE - 1));
KyongHo Cho2a965362012-05-12 05:56:09 +0900839 if (!pent)
Cho KyongHo61128f02014-05-12 11:44:47 +0530840 return ERR_PTR(-ENOMEM);
KyongHo Cho2a965362012-05-12 05:56:09 +0900841
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100842 update_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
Colin Crossdc3814f2015-05-08 17:05:44 -0700843 kmemleak_ignore(pent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900844 *pgcounter = NUM_LV2ENTRIES;
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100845 dma_map_single(dma_dev, pent, LV2TABLE_SIZE, DMA_TO_DEVICE);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530846
847 /*
Sachin Kamatf171aba2014-08-04 10:06:28 +0530848 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
849 * FLPD cache may cache the address of zero_l2_table. This
850 * function replaces the zero_l2_table with new L2 page table
851 * to write valid mappings.
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530852 * Accessing the valid area may cause page fault since FLPD
Sachin Kamatf171aba2014-08-04 10:06:28 +0530853 * cache may still cache zero_l2_table for the valid area
854 * instead of new L2 page table that has the mapping
855 * information of the valid area.
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530856 * Thus any replacement of zero_l2_table with other valid L2
857 * page table must involve FLPD cache invalidation for System
858 * MMU v3.3.
859 * FLPD cache invalidation is performed with TLB invalidation
860 * by VPN without blocking. It is safe to invalidate TLB without
861 * blocking because the target address of TLB invalidation is
862 * not currently mapped.
863 */
864 if (need_flush_flpd_cache) {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200865 struct sysmmu_drvdata *data;
Sachin Kamat365409d2014-05-22 09:50:56 +0530866
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200867 spin_lock(&domain->lock);
868 list_for_each_entry(data, &domain->clients, domain_node)
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200869 sysmmu_tlb_invalidate_flpdcache(data, iova);
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200870 spin_unlock(&domain->lock);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530871 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900872 }
873
874 return page_entry(sent, iova);
875}
876
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200877static int lv1set_section(struct exynos_iommu_domain *domain,
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530878 sysmmu_pte_t *sent, sysmmu_iova_t iova,
Cho KyongHo61128f02014-05-12 11:44:47 +0530879 phys_addr_t paddr, short *pgcnt)
KyongHo Cho2a965362012-05-12 05:56:09 +0900880{
Cho KyongHo61128f02014-05-12 11:44:47 +0530881 if (lv1ent_section(sent)) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530882 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
Cho KyongHo61128f02014-05-12 11:44:47 +0530883 iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900884 return -EADDRINUSE;
Cho KyongHo61128f02014-05-12 11:44:47 +0530885 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900886
887 if (lv1ent_page(sent)) {
Cho KyongHo61128f02014-05-12 11:44:47 +0530888 if (*pgcnt != NUM_LV2ENTRIES) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530889 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
Cho KyongHo61128f02014-05-12 11:44:47 +0530890 iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900891 return -EADDRINUSE;
Cho KyongHo61128f02014-05-12 11:44:47 +0530892 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900893
Cho KyongHo734c3c72014-05-12 11:44:48 +0530894 kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
KyongHo Cho2a965362012-05-12 05:56:09 +0900895 *pgcnt = 0;
896 }
897
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100898 update_pte(sent, mk_lv1ent_sect(paddr));
KyongHo Cho2a965362012-05-12 05:56:09 +0900899
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200900 spin_lock(&domain->lock);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530901 if (lv1ent_page_zero(sent)) {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200902 struct sysmmu_drvdata *data;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530903 /*
904 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
905 * entry by speculative prefetch of SLPD which has no mapping.
906 */
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200907 list_for_each_entry(data, &domain->clients, domain_node)
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200908 sysmmu_tlb_invalidate_flpdcache(data, iova);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530909 }
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200910 spin_unlock(&domain->lock);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530911
KyongHo Cho2a965362012-05-12 05:56:09 +0900912 return 0;
913}
914
Cho KyongHod09d78f2014-05-12 11:44:58 +0530915static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
KyongHo Cho2a965362012-05-12 05:56:09 +0900916 short *pgcnt)
917{
918 if (size == SPAGE_SIZE) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +0530919 if (WARN_ON(!lv2ent_fault(pent)))
KyongHo Cho2a965362012-05-12 05:56:09 +0900920 return -EADDRINUSE;
921
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100922 update_pte(pent, mk_lv2ent_spage(paddr));
KyongHo Cho2a965362012-05-12 05:56:09 +0900923 *pgcnt -= 1;
924 } else { /* size == LPAGE_SIZE */
925 int i;
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100926 dma_addr_t pent_base = virt_to_phys(pent);
Sachin Kamat365409d2014-05-22 09:50:56 +0530927
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100928 dma_sync_single_for_cpu(dma_dev, pent_base,
929 sizeof(*pent) * SPAGES_PER_LPAGE,
930 DMA_TO_DEVICE);
KyongHo Cho2a965362012-05-12 05:56:09 +0900931 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +0530932 if (WARN_ON(!lv2ent_fault(pent))) {
Cho KyongHo61128f02014-05-12 11:44:47 +0530933 if (i > 0)
934 memset(pent - i, 0, sizeof(*pent) * i);
KyongHo Cho2a965362012-05-12 05:56:09 +0900935 return -EADDRINUSE;
936 }
937
938 *pent = mk_lv2ent_lpage(paddr);
939 }
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100940 dma_sync_single_for_device(dma_dev, pent_base,
941 sizeof(*pent) * SPAGES_PER_LPAGE,
942 DMA_TO_DEVICE);
KyongHo Cho2a965362012-05-12 05:56:09 +0900943 *pgcnt -= SPAGES_PER_LPAGE;
944 }
945
946 return 0;
947}
948
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530949/*
950 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
951 *
Sachin Kamatf171aba2014-08-04 10:06:28 +0530952 * System MMU v3.x has advanced logic to improve address translation
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530953 * performance with caching more page table entries by a page table walk.
Sachin Kamatf171aba2014-08-04 10:06:28 +0530954 * However, the logic has a bug that while caching faulty page table entries,
955 * System MMU reports page fault if the cached fault entry is hit even though
956 * the fault entry is updated to a valid entry after the entry is cached.
957 * To prevent caching faulty page table entries which may be updated to valid
958 * entries later, the virtual memory manager should care about the workaround
959 * for the problem. The following describes the workaround.
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530960 *
961 * Any two consecutive I/O virtual address regions must have a hole of 128KiB
Sachin Kamatf171aba2014-08-04 10:06:28 +0530962 * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530963 *
Sachin Kamatf171aba2014-08-04 10:06:28 +0530964 * Precisely, any start address of I/O virtual region must be aligned with
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530965 * the following sizes for System MMU v3.1 and v3.2.
966 * System MMU v3.1: 128KiB
967 * System MMU v3.2: 256KiB
968 *
969 * Because System MMU v3.3 caches page table entries more aggressively, it needs
Sachin Kamatf171aba2014-08-04 10:06:28 +0530970 * more workarounds.
971 * - Any two consecutive I/O virtual regions must have a hole of size larger
972 * than or equal to 128KiB.
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530973 * - Start address of an I/O virtual region must be aligned by 128KiB.
974 */
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200975static int exynos_iommu_map(struct iommu_domain *iommu_domain,
976 unsigned long l_iova, phys_addr_t paddr, size_t size,
977 int prot)
KyongHo Cho2a965362012-05-12 05:56:09 +0900978{
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200979 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Cho KyongHod09d78f2014-05-12 11:44:58 +0530980 sysmmu_pte_t *entry;
981 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
KyongHo Cho2a965362012-05-12 05:56:09 +0900982 unsigned long flags;
983 int ret = -ENOMEM;
984
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200985 BUG_ON(domain->pgtable == NULL);
KyongHo Cho2a965362012-05-12 05:56:09 +0900986
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200987 spin_lock_irqsave(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900988
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200989 entry = section_entry(domain->pgtable, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900990
991 if (size == SECT_SIZE) {
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200992 ret = lv1set_section(domain, entry, iova, paddr,
993 &domain->lv2entcnt[lv1ent_offset(iova)]);
KyongHo Cho2a965362012-05-12 05:56:09 +0900994 } else {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530995 sysmmu_pte_t *pent;
KyongHo Cho2a965362012-05-12 05:56:09 +0900996
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200997 pent = alloc_lv2entry(domain, entry, iova,
998 &domain->lv2entcnt[lv1ent_offset(iova)]);
KyongHo Cho2a965362012-05-12 05:56:09 +0900999
Cho KyongHo61128f02014-05-12 11:44:47 +05301000 if (IS_ERR(pent))
1001 ret = PTR_ERR(pent);
KyongHo Cho2a965362012-05-12 05:56:09 +09001002 else
1003 ret = lv2set_page(pent, paddr, size,
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001004 &domain->lv2entcnt[lv1ent_offset(iova)]);
KyongHo Cho2a965362012-05-12 05:56:09 +09001005 }
1006
Cho KyongHo61128f02014-05-12 11:44:47 +05301007 if (ret)
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301008 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
1009 __func__, ret, size, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +09001010
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001011 spin_unlock_irqrestore(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001012
1013 return ret;
1014}
1015
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001016static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain,
1017 sysmmu_iova_t iova, size_t size)
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301018{
Marek Szyprowski469aceb2015-05-19 15:20:27 +02001019 struct sysmmu_drvdata *data;
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301020 unsigned long flags;
1021
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001022 spin_lock_irqsave(&domain->lock, flags);
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301023
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001024 list_for_each_entry(data, &domain->clients, domain_node)
Marek Szyprowski469aceb2015-05-19 15:20:27 +02001025 sysmmu_tlb_invalidate_entry(data, iova, size);
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301026
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001027 spin_unlock_irqrestore(&domain->lock, flags);
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301028}
1029
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001030static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
1031 unsigned long l_iova, size_t size)
KyongHo Cho2a965362012-05-12 05:56:09 +09001032{
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001033 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Cho KyongHod09d78f2014-05-12 11:44:58 +05301034 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1035 sysmmu_pte_t *ent;
Cho KyongHo61128f02014-05-12 11:44:47 +05301036 size_t err_pgsize;
Cho KyongHod09d78f2014-05-12 11:44:58 +05301037 unsigned long flags;
KyongHo Cho2a965362012-05-12 05:56:09 +09001038
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001039 BUG_ON(domain->pgtable == NULL);
KyongHo Cho2a965362012-05-12 05:56:09 +09001040
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001041 spin_lock_irqsave(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001042
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001043 ent = section_entry(domain->pgtable, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +09001044
1045 if (lv1ent_section(ent)) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301046 if (WARN_ON(size < SECT_SIZE)) {
Cho KyongHo61128f02014-05-12 11:44:47 +05301047 err_pgsize = SECT_SIZE;
1048 goto err;
1049 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001050
Sachin Kamatf171aba2014-08-04 10:06:28 +05301051 /* workaround for h/w bug in System MMU v3.3 */
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001052 update_pte(ent, ZERO_LV2LINK);
KyongHo Cho2a965362012-05-12 05:56:09 +09001053 size = SECT_SIZE;
1054 goto done;
1055 }
1056
1057 if (unlikely(lv1ent_fault(ent))) {
1058 if (size > SECT_SIZE)
1059 size = SECT_SIZE;
1060 goto done;
1061 }
1062
1063 /* lv1ent_page(sent) == true here */
1064
1065 ent = page_entry(ent, iova);
1066
1067 if (unlikely(lv2ent_fault(ent))) {
1068 size = SPAGE_SIZE;
1069 goto done;
1070 }
1071
1072 if (lv2ent_small(ent)) {
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001073 update_pte(ent, 0);
KyongHo Cho2a965362012-05-12 05:56:09 +09001074 size = SPAGE_SIZE;
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001075 domain->lv2entcnt[lv1ent_offset(iova)] += 1;
KyongHo Cho2a965362012-05-12 05:56:09 +09001076 goto done;
1077 }
1078
1079 /* lv1ent_large(ent) == true here */
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301080 if (WARN_ON(size < LPAGE_SIZE)) {
Cho KyongHo61128f02014-05-12 11:44:47 +05301081 err_pgsize = LPAGE_SIZE;
1082 goto err;
1083 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001084
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001085 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent),
1086 sizeof(*ent) * SPAGES_PER_LPAGE,
1087 DMA_TO_DEVICE);
KyongHo Cho2a965362012-05-12 05:56:09 +09001088 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001089 dma_sync_single_for_device(dma_dev, virt_to_phys(ent),
1090 sizeof(*ent) * SPAGES_PER_LPAGE,
1091 DMA_TO_DEVICE);
KyongHo Cho2a965362012-05-12 05:56:09 +09001092 size = LPAGE_SIZE;
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001093 domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
KyongHo Cho2a965362012-05-12 05:56:09 +09001094done:
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001095 spin_unlock_irqrestore(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001096
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001097 exynos_iommu_tlb_invalidate_entry(domain, iova, size);
KyongHo Cho2a965362012-05-12 05:56:09 +09001098
KyongHo Cho2a965362012-05-12 05:56:09 +09001099 return size;
Cho KyongHo61128f02014-05-12 11:44:47 +05301100err:
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001101 spin_unlock_irqrestore(&domain->pgtablelock, flags);
Cho KyongHo61128f02014-05-12 11:44:47 +05301102
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301103 pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1104 __func__, size, iova, err_pgsize);
Cho KyongHo61128f02014-05-12 11:44:47 +05301105
1106 return 0;
KyongHo Cho2a965362012-05-12 05:56:09 +09001107}
1108
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001109static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
Varun Sethibb5547a2013-03-29 01:23:58 +05301110 dma_addr_t iova)
KyongHo Cho2a965362012-05-12 05:56:09 +09001111{
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001112 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Cho KyongHod09d78f2014-05-12 11:44:58 +05301113 sysmmu_pte_t *entry;
KyongHo Cho2a965362012-05-12 05:56:09 +09001114 unsigned long flags;
1115 phys_addr_t phys = 0;
1116
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001117 spin_lock_irqsave(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001118
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001119 entry = section_entry(domain->pgtable, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +09001120
1121 if (lv1ent_section(entry)) {
1122 phys = section_phys(entry) + section_offs(iova);
1123 } else if (lv1ent_page(entry)) {
1124 entry = page_entry(entry, iova);
1125
1126 if (lv2ent_large(entry))
1127 phys = lpage_phys(entry) + lpage_offs(iova);
1128 else if (lv2ent_small(entry))
1129 phys = spage_phys(entry) + spage_offs(iova);
1130 }
1131
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001132 spin_unlock_irqrestore(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001133
1134 return phys;
1135}
1136
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001137static struct iommu_group *get_device_iommu_group(struct device *dev)
1138{
1139 struct iommu_group *group;
1140
1141 group = iommu_group_get(dev);
1142 if (!group)
1143 group = iommu_group_alloc();
1144
1145 return group;
1146}
1147
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301148static int exynos_iommu_add_device(struct device *dev)
1149{
1150 struct iommu_group *group;
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301151
Marek Szyprowski06801db2015-05-19 15:20:32 +02001152 if (!has_sysmmu(dev))
1153 return -ENODEV;
1154
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001155 group = iommu_group_get_for_dev(dev);
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301156
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001157 if (IS_ERR(group))
1158 return PTR_ERR(group);
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301159
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301160 iommu_group_put(group);
1161
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001162 return 0;
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301163}
1164
1165static void exynos_iommu_remove_device(struct device *dev)
1166{
Marek Szyprowski06801db2015-05-19 15:20:32 +02001167 if (!has_sysmmu(dev))
1168 return;
1169
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301170 iommu_group_remove_device(dev);
1171}
1172
Marek Szyprowskiaa759fd2015-05-19 15:20:37 +02001173static int exynos_iommu_of_xlate(struct device *dev,
1174 struct of_phandle_args *spec)
1175{
1176 struct exynos_iommu_owner *owner = dev->archdata.iommu;
1177 struct platform_device *sysmmu = of_find_device_by_node(spec->np);
1178 struct sysmmu_drvdata *data;
1179
1180 if (!sysmmu)
1181 return -ENODEV;
1182
1183 data = platform_get_drvdata(sysmmu);
1184 if (!data)
1185 return -ENODEV;
1186
1187 if (!owner) {
1188 owner = kzalloc(sizeof(*owner), GFP_KERNEL);
1189 if (!owner)
1190 return -ENOMEM;
1191
1192 INIT_LIST_HEAD(&owner->controllers);
1193 dev->archdata.iommu = owner;
1194 }
1195
1196 list_add_tail(&data->owner_node, &owner->controllers);
1197 return 0;
1198}
1199
Marek Szyprowski8ed55c82015-05-19 15:20:36 +02001200static struct iommu_ops exynos_iommu_ops = {
Joerg Roedele1fd1ea2015-03-26 13:43:11 +01001201 .domain_alloc = exynos_iommu_domain_alloc,
1202 .domain_free = exynos_iommu_domain_free,
Bjorn Helgaasba5fa6f2014-05-08 14:49:14 -06001203 .attach_dev = exynos_iommu_attach_device,
1204 .detach_dev = exynos_iommu_detach_device,
1205 .map = exynos_iommu_map,
1206 .unmap = exynos_iommu_unmap,
Olav Haugan315786e2014-10-25 09:55:16 -07001207 .map_sg = default_iommu_map_sg,
Bjorn Helgaasba5fa6f2014-05-08 14:49:14 -06001208 .iova_to_phys = exynos_iommu_iova_to_phys,
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001209 .device_group = get_device_iommu_group,
Bjorn Helgaasba5fa6f2014-05-08 14:49:14 -06001210 .add_device = exynos_iommu_add_device,
1211 .remove_device = exynos_iommu_remove_device,
KyongHo Cho2a965362012-05-12 05:56:09 +09001212 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
Marek Szyprowskiaa759fd2015-05-19 15:20:37 +02001213 .of_xlate = exynos_iommu_of_xlate,
KyongHo Cho2a965362012-05-12 05:56:09 +09001214};
1215
Marek Szyprowski8ed55c82015-05-19 15:20:36 +02001216static bool init_done;
1217
KyongHo Cho2a965362012-05-12 05:56:09 +09001218static int __init exynos_iommu_init(void)
1219{
1220 int ret;
1221
Cho KyongHo734c3c72014-05-12 11:44:48 +05301222 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1223 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1224 if (!lv2table_kmem_cache) {
1225 pr_err("%s: Failed to create kmem cache\n", __func__);
1226 return -ENOMEM;
1227 }
1228
KyongHo Cho2a965362012-05-12 05:56:09 +09001229 ret = platform_driver_register(&exynos_sysmmu_driver);
Cho KyongHo734c3c72014-05-12 11:44:48 +05301230 if (ret) {
1231 pr_err("%s: Failed to register driver\n", __func__);
1232 goto err_reg_driver;
1233 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001234
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301235 zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1236 if (zero_lv2_table == NULL) {
1237 pr_err("%s: Failed to allocate zero level2 page table\n",
1238 __func__);
1239 ret = -ENOMEM;
1240 goto err_zero_lv2;
1241 }
1242
Cho KyongHo734c3c72014-05-12 11:44:48 +05301243 ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1244 if (ret) {
1245 pr_err("%s: Failed to register exynos-iommu driver.\n",
1246 __func__);
1247 goto err_set_iommu;
1248 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001249
Marek Szyprowski8ed55c82015-05-19 15:20:36 +02001250 init_done = true;
1251
Cho KyongHo734c3c72014-05-12 11:44:48 +05301252 return 0;
1253err_set_iommu:
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301254 kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
1255err_zero_lv2:
Cho KyongHo734c3c72014-05-12 11:44:48 +05301256 platform_driver_unregister(&exynos_sysmmu_driver);
1257err_reg_driver:
1258 kmem_cache_destroy(lv2table_kmem_cache);
KyongHo Cho2a965362012-05-12 05:56:09 +09001259 return ret;
1260}
Marek Szyprowski8ed55c82015-05-19 15:20:36 +02001261
1262static int __init exynos_iommu_of_setup(struct device_node *np)
1263{
1264 struct platform_device *pdev;
1265
1266 if (!init_done)
1267 exynos_iommu_init();
1268
1269 pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root);
1270 if (IS_ERR(pdev))
1271 return PTR_ERR(pdev);
1272
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001273 /*
1274 * use the first registered sysmmu device for performing
1275 * dma mapping operations on iommu page tables (cpu cache flush)
1276 */
1277 if (!dma_dev)
1278 dma_dev = &pdev->dev;
1279
Marek Szyprowski8ed55c82015-05-19 15:20:36 +02001280 of_iommu_set_ops(np, &exynos_iommu_ops);
1281 return 0;
1282}
1283
1284IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu",
1285 exynos_iommu_of_setup);