blob: 3a577a473f3c8126d0a44afaef23c080658d96df [file] [log] [blame]
KyongHo Cho2a965362012-05-12 05:56:09 +09001/* linux/drivers/iommu/exynos_iommu.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
12#define DEBUG
13#endif
14
KyongHo Cho2a965362012-05-12 05:56:09 +090015#include <linux/clk.h>
Marek Szyprowski8ed55c82015-05-19 15:20:36 +020016#include <linux/dma-mapping.h>
KyongHo Cho2a965362012-05-12 05:56:09 +090017#include <linux/err.h>
Marek Szyprowski312900c2015-05-19 15:20:30 +020018#include <linux/io.h>
KyongHo Cho2a965362012-05-12 05:56:09 +090019#include <linux/iommu.h>
Marek Szyprowski312900c2015-05-19 15:20:30 +020020#include <linux/interrupt.h>
KyongHo Cho2a965362012-05-12 05:56:09 +090021#include <linux/list.h>
Marek Szyprowski8ed55c82015-05-19 15:20:36 +020022#include <linux/of.h>
23#include <linux/of_iommu.h>
24#include <linux/of_platform.h>
Marek Szyprowski312900c2015-05-19 15:20:30 +020025#include <linux/platform_device.h>
26#include <linux/pm_runtime.h>
27#include <linux/slab.h>
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +010028#include <linux/dma-iommu.h>
KyongHo Cho2a965362012-05-12 05:56:09 +090029
Cho KyongHod09d78f2014-05-12 11:44:58 +053030typedef u32 sysmmu_iova_t;
31typedef u32 sysmmu_pte_t;
32
Sachin Kamatf171aba2014-08-04 10:06:28 +053033/* We do not consider super section mapping (16MB) */
KyongHo Cho2a965362012-05-12 05:56:09 +090034#define SECT_ORDER 20
35#define LPAGE_ORDER 16
36#define SPAGE_ORDER 12
37
38#define SECT_SIZE (1 << SECT_ORDER)
39#define LPAGE_SIZE (1 << LPAGE_ORDER)
40#define SPAGE_SIZE (1 << SPAGE_ORDER)
41
42#define SECT_MASK (~(SECT_SIZE - 1))
43#define LPAGE_MASK (~(LPAGE_SIZE - 1))
44#define SPAGE_MASK (~(SPAGE_SIZE - 1))
45
Cho KyongHo66a7ed82014-05-12 11:45:04 +053046#define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
47 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
48#define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
49#define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
50#define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
51 ((*(sent) & 3) == 1))
KyongHo Cho2a965362012-05-12 05:56:09 +090052#define lv1ent_section(sent) ((*(sent) & 3) == 2)
53
54#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
55#define lv2ent_small(pent) ((*(pent) & 2) == 2)
56#define lv2ent_large(pent) ((*(pent) & 3) == 1)
57
Cho KyongHod09d78f2014-05-12 11:44:58 +053058static u32 sysmmu_page_offset(sysmmu_iova_t iova, u32 size)
59{
60 return iova & (size - 1);
61}
KyongHo Cho2a965362012-05-12 05:56:09 +090062
Cho KyongHod09d78f2014-05-12 11:44:58 +053063#define section_phys(sent) (*(sent) & SECT_MASK)
64#define section_offs(iova) sysmmu_page_offset((iova), SECT_SIZE)
65#define lpage_phys(pent) (*(pent) & LPAGE_MASK)
66#define lpage_offs(iova) sysmmu_page_offset((iova), LPAGE_SIZE)
67#define spage_phys(pent) (*(pent) & SPAGE_MASK)
68#define spage_offs(iova) sysmmu_page_offset((iova), SPAGE_SIZE)
KyongHo Cho2a965362012-05-12 05:56:09 +090069
70#define NUM_LV1ENTRIES 4096
Cho KyongHod09d78f2014-05-12 11:44:58 +053071#define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
KyongHo Cho2a965362012-05-12 05:56:09 +090072
Cho KyongHod09d78f2014-05-12 11:44:58 +053073static u32 lv1ent_offset(sysmmu_iova_t iova)
74{
75 return iova >> SECT_ORDER;
76}
77
78static u32 lv2ent_offset(sysmmu_iova_t iova)
79{
80 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
81}
82
Marek Szyprowski5e3435e2016-02-18 15:12:50 +010083#define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
Cho KyongHod09d78f2014-05-12 11:44:58 +053084#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
KyongHo Cho2a965362012-05-12 05:56:09 +090085
86#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
87
88#define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
89
90#define mk_lv1ent_sect(pa) ((pa) | 2)
91#define mk_lv1ent_page(pa) ((pa) | 1)
92#define mk_lv2ent_lpage(pa) ((pa) | 1)
93#define mk_lv2ent_spage(pa) ((pa) | 2)
94
95#define CTRL_ENABLE 0x5
96#define CTRL_BLOCK 0x7
97#define CTRL_DISABLE 0x0
98
Cho KyongHoeeb51842014-05-12 11:45:03 +053099#define CFG_LRU 0x1
100#define CFG_QOS(n) ((n & 0xF) << 7)
101#define CFG_MASK 0x0150FFFF /* Selecting bit 0-15, 20, 22 and 24 */
102#define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
103#define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
104#define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
105
KyongHo Cho2a965362012-05-12 05:56:09 +0900106#define REG_MMU_CTRL 0x000
107#define REG_MMU_CFG 0x004
108#define REG_MMU_STATUS 0x008
109#define REG_MMU_FLUSH 0x00C
110#define REG_MMU_FLUSH_ENTRY 0x010
111#define REG_PT_BASE_ADDR 0x014
112#define REG_INT_STATUS 0x018
113#define REG_INT_CLEAR 0x01C
114
115#define REG_PAGE_FAULT_ADDR 0x024
116#define REG_AW_FAULT_ADDR 0x028
117#define REG_AR_FAULT_ADDR 0x02C
118#define REG_DEFAULT_SLAVE_ADDR 0x030
119
120#define REG_MMU_VERSION 0x034
121
Cho KyongHoeeb51842014-05-12 11:45:03 +0530122#define MMU_MAJ_VER(val) ((val) >> 7)
123#define MMU_MIN_VER(val) ((val) & 0x7F)
124#define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
125
126#define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
127
KyongHo Cho2a965362012-05-12 05:56:09 +0900128#define REG_PB0_SADDR 0x04C
129#define REG_PB0_EADDR 0x050
130#define REG_PB1_SADDR 0x054
131#define REG_PB1_EADDR 0x058
132
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530133#define has_sysmmu(dev) (dev->archdata.iommu != NULL)
134
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100135static struct device *dma_dev;
Cho KyongHo734c3c72014-05-12 11:44:48 +0530136static struct kmem_cache *lv2table_kmem_cache;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530137static sysmmu_pte_t *zero_lv2_table;
138#define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
Cho KyongHo734c3c72014-05-12 11:44:48 +0530139
Cho KyongHod09d78f2014-05-12 11:44:58 +0530140static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
KyongHo Cho2a965362012-05-12 05:56:09 +0900141{
142 return pgtable + lv1ent_offset(iova);
143}
144
Cho KyongHod09d78f2014-05-12 11:44:58 +0530145static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
KyongHo Cho2a965362012-05-12 05:56:09 +0900146{
Cho KyongHod09d78f2014-05-12 11:44:58 +0530147 return (sysmmu_pte_t *)phys_to_virt(
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530148 lv2table_base(sent)) + lv2ent_offset(iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900149}
150
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100151/*
152 * IOMMU fault information register
153 */
154struct sysmmu_fault_info {
155 unsigned int bit; /* bit number in STATUS register */
156 unsigned short addr_reg; /* register to read VA fault address */
157 const char *name; /* human readable fault name */
158 unsigned int type; /* fault type for report_iommu_fault */
KyongHo Cho2a965362012-05-12 05:56:09 +0900159};
160
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100161static const struct sysmmu_fault_info sysmmu_faults[] = {
162 { 0, REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ },
163 { 1, REG_AR_FAULT_ADDR, "AR MULTI-HIT", IOMMU_FAULT_READ },
164 { 2, REG_AW_FAULT_ADDR, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
165 { 3, REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ },
166 { 4, REG_AR_FAULT_ADDR, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
167 { 5, REG_AR_FAULT_ADDR, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
168 { 6, REG_AW_FAULT_ADDR, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
169 { 7, REG_AW_FAULT_ADDR, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
KyongHo Cho2a965362012-05-12 05:56:09 +0900170};
171
Marek Szyprowski2860af32015-05-19 15:20:31 +0200172/*
173 * This structure is attached to dev.archdata.iommu of the master device
174 * on device add, contains a list of SYSMMU controllers defined by device tree,
175 * which are bound to given master device. It is usually referenced by 'owner'
176 * pointer.
177*/
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530178struct exynos_iommu_owner {
Marek Szyprowski1b092052015-05-19 15:20:33 +0200179 struct list_head controllers; /* list of sysmmu_drvdata.owner_node */
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530180};
181
Marek Szyprowski2860af32015-05-19 15:20:31 +0200182/*
183 * This structure exynos specific generalization of struct iommu_domain.
184 * It contains list of SYSMMU controllers from all master devices, which has
185 * been attached to this domain and page tables of IO address space defined by
186 * it. It is usually referenced by 'domain' pointer.
187 */
KyongHo Cho2a965362012-05-12 05:56:09 +0900188struct exynos_iommu_domain {
Marek Szyprowski2860af32015-05-19 15:20:31 +0200189 struct list_head clients; /* list of sysmmu_drvdata.domain_node */
190 sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
191 short *lv2entcnt; /* free lv2 entry counter for each section */
192 spinlock_t lock; /* lock for modyfying list of clients */
193 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100194 struct iommu_domain domain; /* generic domain data structure */
KyongHo Cho2a965362012-05-12 05:56:09 +0900195};
196
Marek Szyprowski2860af32015-05-19 15:20:31 +0200197/*
198 * This structure hold all data of a single SYSMMU controller, this includes
199 * hw resources like registers and clocks, pointers and list nodes to connect
200 * it to all other structures, internal state and parameters read from device
201 * tree. It is usually referenced by 'data' pointer.
202 */
KyongHo Cho2a965362012-05-12 05:56:09 +0900203struct sysmmu_drvdata {
Marek Szyprowski2860af32015-05-19 15:20:31 +0200204 struct device *sysmmu; /* SYSMMU controller device */
205 struct device *master; /* master device (owner) */
206 void __iomem *sfrbase; /* our registers */
207 struct clk *clk; /* SYSMMU's clock */
208 struct clk *clk_master; /* master's device clock */
209 int activations; /* number of calls to sysmmu_enable */
210 spinlock_t lock; /* lock for modyfying state */
211 struct exynos_iommu_domain *domain; /* domain we belong to */
212 struct list_head domain_node; /* node for domain clients list */
Marek Szyprowski1b092052015-05-19 15:20:33 +0200213 struct list_head owner_node; /* node for owner controllers list */
Marek Szyprowski2860af32015-05-19 15:20:31 +0200214 phys_addr_t pgtable; /* assigned page table structure */
215 unsigned int version; /* our version */
KyongHo Cho2a965362012-05-12 05:56:09 +0900216};
217
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100218static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
219{
220 return container_of(dom, struct exynos_iommu_domain, domain);
221}
222
KyongHo Cho2a965362012-05-12 05:56:09 +0900223static bool set_sysmmu_active(struct sysmmu_drvdata *data)
224{
225 /* return true if the System MMU was not active previously
226 and it needs to be initialized */
227 return ++data->activations == 1;
228}
229
230static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
231{
232 /* return true if the System MMU is needed to be disabled */
233 BUG_ON(data->activations < 1);
234 return --data->activations == 0;
235}
236
237static bool is_sysmmu_active(struct sysmmu_drvdata *data)
238{
239 return data->activations > 0;
240}
241
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100242static void sysmmu_unblock(struct sysmmu_drvdata *data)
KyongHo Cho2a965362012-05-12 05:56:09 +0900243{
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100244 __raw_writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
KyongHo Cho2a965362012-05-12 05:56:09 +0900245}
246
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100247static bool sysmmu_block(struct sysmmu_drvdata *data)
KyongHo Cho2a965362012-05-12 05:56:09 +0900248{
249 int i = 120;
250
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100251 __raw_writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
252 while ((i > 0) && !(__raw_readl(data->sfrbase + REG_MMU_STATUS) & 1))
KyongHo Cho2a965362012-05-12 05:56:09 +0900253 --i;
254
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100255 if (!(__raw_readl(data->sfrbase + REG_MMU_STATUS) & 1)) {
256 sysmmu_unblock(data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900257 return false;
258 }
259
260 return true;
261}
262
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100263static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data)
KyongHo Cho2a965362012-05-12 05:56:09 +0900264{
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100265 __raw_writel(0x1, data->sfrbase + REG_MMU_FLUSH);
KyongHo Cho2a965362012-05-12 05:56:09 +0900266}
267
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100268static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
Cho KyongHod09d78f2014-05-12 11:44:58 +0530269 sysmmu_iova_t iova, unsigned int num_inv)
KyongHo Cho2a965362012-05-12 05:56:09 +0900270{
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530271 unsigned int i;
Sachin Kamat365409d2014-05-22 09:50:56 +0530272
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530273 for (i = 0; i < num_inv; i++) {
274 __raw_writel((iova & SPAGE_MASK) | 1,
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100275 data->sfrbase + REG_MMU_FLUSH_ENTRY);
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530276 iova += SPAGE_SIZE;
277 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900278}
279
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100280static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
KyongHo Cho2a965362012-05-12 05:56:09 +0900281{
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100282 __raw_writel(pgd, data->sfrbase + REG_PT_BASE_ADDR);
KyongHo Cho2a965362012-05-12 05:56:09 +0900283
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100284 __sysmmu_tlb_invalidate(data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900285}
286
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100287static void show_fault_information(struct sysmmu_drvdata *data,
288 const struct sysmmu_fault_info *finfo,
289 sysmmu_iova_t fault_addr)
KyongHo Cho2a965362012-05-12 05:56:09 +0900290{
Cho KyongHod09d78f2014-05-12 11:44:58 +0530291 sysmmu_pte_t *ent;
KyongHo Cho2a965362012-05-12 05:56:09 +0900292
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100293 dev_err(data->sysmmu, "%s FAULT occurred at %#x (page table base: %pa)\n",
294 finfo->name, fault_addr, &data->pgtable);
295 ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
296 dev_err(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900297 if (lv1ent_page(ent)) {
298 ent = page_entry(ent, fault_addr);
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100299 dev_err(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900300 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900301}
302
303static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
304{
Sachin Kamatf171aba2014-08-04 10:06:28 +0530305 /* SYSMMU is in blocked state when interrupt occurred. */
KyongHo Cho2a965362012-05-12 05:56:09 +0900306 struct sysmmu_drvdata *data = dev_id;
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100307 const struct sysmmu_fault_info *finfo = sysmmu_faults;
308 int i, n = ARRAY_SIZE(sysmmu_faults);
309 unsigned int itype;
310 sysmmu_iova_t fault_addr = -1;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530311 int ret = -ENOSYS;
KyongHo Cho2a965362012-05-12 05:56:09 +0900312
KyongHo Cho2a965362012-05-12 05:56:09 +0900313 WARN_ON(!is_sysmmu_active(data));
314
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530315 spin_lock(&data->lock);
316
Marek Szyprowskib398af22016-02-18 15:12:51 +0100317 clk_enable(data->clk_master);
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530318
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100319 itype = __ffs(__raw_readl(data->sfrbase + REG_INT_STATUS));
320 for (i = 0; i < n; i++, finfo++)
321 if (finfo->bit == itype)
322 break;
323 /* unknown/unsupported fault */
324 BUG_ON(i == n);
KyongHo Cho2a965362012-05-12 05:56:09 +0900325
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100326 /* print debug message */
327 fault_addr = __raw_readl(data->sfrbase + finfo->addr_reg);
328 show_fault_information(data, finfo, fault_addr);
KyongHo Cho2a965362012-05-12 05:56:09 +0900329
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100330 if (data->domain)
331 ret = report_iommu_fault(&data->domain->domain,
332 data->master, fault_addr, finfo->type);
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530333 /* fault is not recovered by fault handler */
334 BUG_ON(ret != 0);
KyongHo Cho2a965362012-05-12 05:56:09 +0900335
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530336 __raw_writel(1 << itype, data->sfrbase + REG_INT_CLEAR);
337
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100338 sysmmu_unblock(data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900339
Marek Szyprowskib398af22016-02-18 15:12:51 +0100340 clk_disable(data->clk_master);
Cho KyongHo70605872014-05-12 11:44:55 +0530341
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530342 spin_unlock(&data->lock);
KyongHo Cho2a965362012-05-12 05:56:09 +0900343
344 return IRQ_HANDLED;
345}
346
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530347static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data)
KyongHo Cho2a965362012-05-12 05:56:09 +0900348{
Marek Szyprowskib398af22016-02-18 15:12:51 +0100349 clk_enable(data->clk_master);
Cho KyongHo70605872014-05-12 11:44:55 +0530350
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530351 __raw_writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530352 __raw_writel(0, data->sfrbase + REG_MMU_CFG);
KyongHo Cho2a965362012-05-12 05:56:09 +0900353
Cho KyongHo46c16d12014-05-12 11:44:54 +0530354 clk_disable(data->clk);
Marek Szyprowskib398af22016-02-18 15:12:51 +0100355 clk_disable(data->clk_master);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530356}
KyongHo Cho2a965362012-05-12 05:56:09 +0900357
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530358static bool __sysmmu_disable(struct sysmmu_drvdata *data)
359{
360 bool disabled;
361 unsigned long flags;
362
363 spin_lock_irqsave(&data->lock, flags);
364
365 disabled = set_sysmmu_inactive(data);
366
367 if (disabled) {
368 data->pgtable = 0;
369 data->domain = NULL;
370
371 __sysmmu_disable_nocount(data);
372
373 dev_dbg(data->sysmmu, "Disabled\n");
374 } else {
375 dev_dbg(data->sysmmu, "%d times left to disable\n",
376 data->activations);
377 }
378
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530379 spin_unlock_irqrestore(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900380
KyongHo Cho2a965362012-05-12 05:56:09 +0900381 return disabled;
382}
383
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530384static void __sysmmu_init_config(struct sysmmu_drvdata *data)
385{
Cho KyongHoeeb51842014-05-12 11:45:03 +0530386 unsigned int cfg = CFG_LRU | CFG_QOS(15);
387 unsigned int ver;
388
Marek Szyprowski512bd0c2015-05-19 15:20:24 +0200389 ver = MMU_RAW_VER(__raw_readl(data->sfrbase + REG_MMU_VERSION));
Cho KyongHoeeb51842014-05-12 11:45:03 +0530390 if (MMU_MAJ_VER(ver) == 3) {
391 if (MMU_MIN_VER(ver) >= 2) {
392 cfg |= CFG_FLPDCACHE;
393 if (MMU_MIN_VER(ver) == 3) {
394 cfg |= CFG_ACGEN;
395 cfg &= ~CFG_LRU;
396 } else {
397 cfg |= CFG_SYSSEL;
398 }
399 }
400 }
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530401
402 __raw_writel(cfg, data->sfrbase + REG_MMU_CFG);
Marek Szyprowski512bd0c2015-05-19 15:20:24 +0200403 data->version = ver;
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530404}
405
406static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
407{
Marek Szyprowskib398af22016-02-18 15:12:51 +0100408 clk_enable(data->clk_master);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530409 clk_enable(data->clk);
410
411 __raw_writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
412
413 __sysmmu_init_config(data);
414
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100415 __sysmmu_set_ptbase(data, data->pgtable);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530416
417 __raw_writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
418
Marek Szyprowskib398af22016-02-18 15:12:51 +0100419 clk_disable(data->clk_master);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530420}
421
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200422static int __sysmmu_enable(struct sysmmu_drvdata *data, phys_addr_t pgtable,
Marek Szyprowskia9133b992015-05-19 15:20:29 +0200423 struct exynos_iommu_domain *domain)
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530424{
425 int ret = 0;
426 unsigned long flags;
427
428 spin_lock_irqsave(&data->lock, flags);
429 if (set_sysmmu_active(data)) {
430 data->pgtable = pgtable;
Marek Szyprowskia9133b992015-05-19 15:20:29 +0200431 data->domain = domain;
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530432
433 __sysmmu_enable_nocount(data);
434
435 dev_dbg(data->sysmmu, "Enabled\n");
436 } else {
437 ret = (pgtable == data->pgtable) ? 1 : -EBUSY;
438
439 dev_dbg(data->sysmmu, "already enabled\n");
440 }
441
442 if (WARN_ON(ret < 0))
443 set_sysmmu_inactive(data); /* decrement count */
444
445 spin_unlock_irqrestore(&data->lock, flags);
446
447 return ret;
448}
449
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530450static void __sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
451 sysmmu_iova_t iova)
452{
Marek Szyprowski512bd0c2015-05-19 15:20:24 +0200453 if (data->version == MAKE_MMU_VER(3, 3))
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530454 __raw_writel(iova | 0x1, data->sfrbase + REG_MMU_FLUSH_ENTRY);
455}
456
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200457static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530458 sysmmu_iova_t iova)
459{
460 unsigned long flags;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530461
Marek Szyprowskib398af22016-02-18 15:12:51 +0100462 clk_enable(data->clk_master);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530463
464 spin_lock_irqsave(&data->lock, flags);
465 if (is_sysmmu_active(data))
466 __sysmmu_tlb_invalidate_flpdcache(data, iova);
467 spin_unlock_irqrestore(&data->lock, flags);
468
Marek Szyprowskib398af22016-02-18 15:12:51 +0100469 clk_disable(data->clk_master);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530470}
471
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200472static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
473 sysmmu_iova_t iova, size_t size)
KyongHo Cho2a965362012-05-12 05:56:09 +0900474{
475 unsigned long flags;
KyongHo Cho2a965362012-05-12 05:56:09 +0900476
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530477 spin_lock_irqsave(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900478 if (is_sysmmu_active(data)) {
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530479 unsigned int num_inv = 1;
Cho KyongHo70605872014-05-12 11:44:55 +0530480
Marek Szyprowskib398af22016-02-18 15:12:51 +0100481 clk_enable(data->clk_master);
Cho KyongHo70605872014-05-12 11:44:55 +0530482
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530483 /*
484 * L2TLB invalidation required
485 * 4KB page: 1 invalidation
Sachin Kamatf171aba2014-08-04 10:06:28 +0530486 * 64KB page: 16 invalidations
487 * 1MB page: 64 invalidations
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530488 * because it is set-associative TLB
489 * with 8-way and 64 sets.
490 * 1MB page can be cached in one of all sets.
491 * 64KB page can be one of 16 consecutive sets.
492 */
Marek Szyprowski512bd0c2015-05-19 15:20:24 +0200493 if (MMU_MAJ_VER(data->version) == 2)
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530494 num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
495
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100496 if (sysmmu_block(data)) {
497 __sysmmu_tlb_invalidate_entry(data, iova, num_inv);
498 sysmmu_unblock(data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900499 }
Marek Szyprowskib398af22016-02-18 15:12:51 +0100500 clk_disable(data->clk_master);
KyongHo Cho2a965362012-05-12 05:56:09 +0900501 } else {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200502 dev_dbg(data->master,
503 "disabled. Skipping TLB invalidation @ %#x\n", iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900504 }
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530505 spin_unlock_irqrestore(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900506}
507
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530508static int __init exynos_sysmmu_probe(struct platform_device *pdev)
KyongHo Cho2a965362012-05-12 05:56:09 +0900509{
Cho KyongHo46c16d12014-05-12 11:44:54 +0530510 int irq, ret;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530511 struct device *dev = &pdev->dev;
KyongHo Cho2a965362012-05-12 05:56:09 +0900512 struct sysmmu_drvdata *data;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530513 struct resource *res;
KyongHo Cho2a965362012-05-12 05:56:09 +0900514
Cho KyongHo46c16d12014-05-12 11:44:54 +0530515 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
516 if (!data)
517 return -ENOMEM;
KyongHo Cho2a965362012-05-12 05:56:09 +0900518
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530519 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Cho KyongHo46c16d12014-05-12 11:44:54 +0530520 data->sfrbase = devm_ioremap_resource(dev, res);
521 if (IS_ERR(data->sfrbase))
522 return PTR_ERR(data->sfrbase);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530523
Cho KyongHo46c16d12014-05-12 11:44:54 +0530524 irq = platform_get_irq(pdev, 0);
525 if (irq <= 0) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +0530526 dev_err(dev, "Unable to find IRQ resource\n");
Cho KyongHo46c16d12014-05-12 11:44:54 +0530527 return irq;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530528 }
529
Cho KyongHo46c16d12014-05-12 11:44:54 +0530530 ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530531 dev_name(dev), data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900532 if (ret) {
Cho KyongHo46c16d12014-05-12 11:44:54 +0530533 dev_err(dev, "Unabled to register handler of irq %d\n", irq);
534 return ret;
KyongHo Cho2a965362012-05-12 05:56:09 +0900535 }
536
Cho KyongHo46c16d12014-05-12 11:44:54 +0530537 data->clk = devm_clk_get(dev, "sysmmu");
538 if (IS_ERR(data->clk)) {
539 dev_err(dev, "Failed to get clock!\n");
540 return PTR_ERR(data->clk);
541 } else {
542 ret = clk_prepare(data->clk);
543 if (ret) {
544 dev_err(dev, "Failed to prepare clk\n");
545 return ret;
546 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900547 }
548
Cho KyongHo70605872014-05-12 11:44:55 +0530549 data->clk_master = devm_clk_get(dev, "master");
550 if (!IS_ERR(data->clk_master)) {
551 ret = clk_prepare(data->clk_master);
552 if (ret) {
553 clk_unprepare(data->clk);
554 dev_err(dev, "Failed to prepare master's clk\n");
555 return ret;
556 }
Marek Szyprowskib398af22016-02-18 15:12:51 +0100557 } else {
558 data->clk_master = NULL;
Cho KyongHo70605872014-05-12 11:44:55 +0530559 }
560
KyongHo Cho2a965362012-05-12 05:56:09 +0900561 data->sysmmu = dev;
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530562 spin_lock_init(&data->lock);
KyongHo Cho2a965362012-05-12 05:56:09 +0900563
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530564 platform_set_drvdata(pdev, data);
565
Cho KyongHof4723ec2014-05-12 11:44:52 +0530566 pm_runtime_enable(dev);
KyongHo Cho2a965362012-05-12 05:56:09 +0900567
KyongHo Cho2a965362012-05-12 05:56:09 +0900568 return 0;
KyongHo Cho2a965362012-05-12 05:56:09 +0900569}
570
Marek Szyprowski622015e2015-05-19 15:20:35 +0200571#ifdef CONFIG_PM_SLEEP
572static int exynos_sysmmu_suspend(struct device *dev)
573{
574 struct sysmmu_drvdata *data = dev_get_drvdata(dev);
575
576 dev_dbg(dev, "suspend\n");
577 if (is_sysmmu_active(data)) {
578 __sysmmu_disable_nocount(data);
579 pm_runtime_put(dev);
580 }
581 return 0;
582}
583
584static int exynos_sysmmu_resume(struct device *dev)
585{
586 struct sysmmu_drvdata *data = dev_get_drvdata(dev);
587
588 dev_dbg(dev, "resume\n");
589 if (is_sysmmu_active(data)) {
590 pm_runtime_get_sync(dev);
591 __sysmmu_enable_nocount(data);
592 }
593 return 0;
594}
595#endif
596
597static const struct dev_pm_ops sysmmu_pm_ops = {
598 SET_LATE_SYSTEM_SLEEP_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume)
599};
600
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530601static const struct of_device_id sysmmu_of_match[] __initconst = {
602 { .compatible = "samsung,exynos-sysmmu", },
603 { },
604};
605
606static struct platform_driver exynos_sysmmu_driver __refdata = {
607 .probe = exynos_sysmmu_probe,
608 .driver = {
KyongHo Cho2a965362012-05-12 05:56:09 +0900609 .name = "exynos-sysmmu",
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530610 .of_match_table = sysmmu_of_match,
Marek Szyprowski622015e2015-05-19 15:20:35 +0200611 .pm = &sysmmu_pm_ops,
KyongHo Cho2a965362012-05-12 05:56:09 +0900612 }
613};
614
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100615static inline void update_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
KyongHo Cho2a965362012-05-12 05:56:09 +0900616{
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100617 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
618 DMA_TO_DEVICE);
619 *ent = val;
620 dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent),
621 DMA_TO_DEVICE);
KyongHo Cho2a965362012-05-12 05:56:09 +0900622}
623
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100624static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
KyongHo Cho2a965362012-05-12 05:56:09 +0900625{
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200626 struct exynos_iommu_domain *domain;
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100627 dma_addr_t handle;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530628 int i;
KyongHo Cho2a965362012-05-12 05:56:09 +0900629
KyongHo Cho2a965362012-05-12 05:56:09 +0900630
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200631 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
632 if (!domain)
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100633 return NULL;
634
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +0100635 if (type == IOMMU_DOMAIN_DMA) {
636 if (iommu_get_dma_cookie(&domain->domain) != 0)
637 goto err_pgtable;
638 } else if (type != IOMMU_DOMAIN_UNMANAGED) {
639 goto err_pgtable;
640 }
641
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200642 domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
643 if (!domain->pgtable)
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +0100644 goto err_dma_cookie;
KyongHo Cho2a965362012-05-12 05:56:09 +0900645
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200646 domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
647 if (!domain->lv2entcnt)
KyongHo Cho2a965362012-05-12 05:56:09 +0900648 goto err_counter;
649
Sachin Kamatf171aba2014-08-04 10:06:28 +0530650 /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530651 for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200652 domain->pgtable[i + 0] = ZERO_LV2LINK;
653 domain->pgtable[i + 1] = ZERO_LV2LINK;
654 domain->pgtable[i + 2] = ZERO_LV2LINK;
655 domain->pgtable[i + 3] = ZERO_LV2LINK;
656 domain->pgtable[i + 4] = ZERO_LV2LINK;
657 domain->pgtable[i + 5] = ZERO_LV2LINK;
658 domain->pgtable[i + 6] = ZERO_LV2LINK;
659 domain->pgtable[i + 7] = ZERO_LV2LINK;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530660 }
661
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100662 handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
663 DMA_TO_DEVICE);
664 /* For mapping page table entries we rely on dma == phys */
665 BUG_ON(handle != virt_to_phys(domain->pgtable));
KyongHo Cho2a965362012-05-12 05:56:09 +0900666
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200667 spin_lock_init(&domain->lock);
668 spin_lock_init(&domain->pgtablelock);
669 INIT_LIST_HEAD(&domain->clients);
KyongHo Cho2a965362012-05-12 05:56:09 +0900670
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200671 domain->domain.geometry.aperture_start = 0;
672 domain->domain.geometry.aperture_end = ~0UL;
673 domain->domain.geometry.force_aperture = true;
Joerg Roedel3177bb72012-07-11 12:41:10 +0200674
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200675 return &domain->domain;
KyongHo Cho2a965362012-05-12 05:56:09 +0900676
677err_counter:
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200678 free_pages((unsigned long)domain->pgtable, 2);
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +0100679err_dma_cookie:
680 if (type == IOMMU_DOMAIN_DMA)
681 iommu_put_dma_cookie(&domain->domain);
KyongHo Cho2a965362012-05-12 05:56:09 +0900682err_pgtable:
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200683 kfree(domain);
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100684 return NULL;
KyongHo Cho2a965362012-05-12 05:56:09 +0900685}
686
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200687static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
KyongHo Cho2a965362012-05-12 05:56:09 +0900688{
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200689 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200690 struct sysmmu_drvdata *data, *next;
KyongHo Cho2a965362012-05-12 05:56:09 +0900691 unsigned long flags;
692 int i;
693
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200694 WARN_ON(!list_empty(&domain->clients));
KyongHo Cho2a965362012-05-12 05:56:09 +0900695
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200696 spin_lock_irqsave(&domain->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900697
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200698 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200699 if (__sysmmu_disable(data))
700 data->master = NULL;
701 list_del_init(&data->domain_node);
KyongHo Cho2a965362012-05-12 05:56:09 +0900702 }
703
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200704 spin_unlock_irqrestore(&domain->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900705
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +0100706 if (iommu_domain->type == IOMMU_DOMAIN_DMA)
707 iommu_put_dma_cookie(iommu_domain);
708
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100709 dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE,
710 DMA_TO_DEVICE);
711
KyongHo Cho2a965362012-05-12 05:56:09 +0900712 for (i = 0; i < NUM_LV1ENTRIES; i++)
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100713 if (lv1ent_page(domain->pgtable + i)) {
714 phys_addr_t base = lv2table_base(domain->pgtable + i);
715
716 dma_unmap_single(dma_dev, base, LV2TABLE_SIZE,
717 DMA_TO_DEVICE);
Cho KyongHo734c3c72014-05-12 11:44:48 +0530718 kmem_cache_free(lv2table_kmem_cache,
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100719 phys_to_virt(base));
720 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900721
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200722 free_pages((unsigned long)domain->pgtable, 2);
723 free_pages((unsigned long)domain->lv2entcnt, 1);
724 kfree(domain);
KyongHo Cho2a965362012-05-12 05:56:09 +0900725}
726
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200727static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
KyongHo Cho2a965362012-05-12 05:56:09 +0900728 struct device *dev)
729{
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530730 struct exynos_iommu_owner *owner = dev->archdata.iommu;
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200731 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200732 struct sysmmu_drvdata *data;
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200733 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
KyongHo Cho2a965362012-05-12 05:56:09 +0900734 unsigned long flags;
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200735 int ret = -ENODEV;
KyongHo Cho2a965362012-05-12 05:56:09 +0900736
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200737 if (!has_sysmmu(dev))
738 return -ENODEV;
KyongHo Cho2a965362012-05-12 05:56:09 +0900739
Marek Szyprowski1b092052015-05-19 15:20:33 +0200740 list_for_each_entry(data, &owner->controllers, owner_node) {
Marek Szyprowskice70ca52015-05-19 15:20:34 +0200741 pm_runtime_get_sync(data->sysmmu);
Marek Szyprowskia9133b992015-05-19 15:20:29 +0200742 ret = __sysmmu_enable(data, pagetable, domain);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200743 if (ret >= 0) {
744 data->master = dev;
KyongHo Cho2a965362012-05-12 05:56:09 +0900745
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200746 spin_lock_irqsave(&domain->lock, flags);
747 list_add_tail(&data->domain_node, &domain->clients);
748 spin_unlock_irqrestore(&domain->lock, flags);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200749 }
750 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900751
752 if (ret < 0) {
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530753 dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
754 __func__, &pagetable);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530755 return ret;
KyongHo Cho2a965362012-05-12 05:56:09 +0900756 }
757
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530758 dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
759 __func__, &pagetable, (ret == 0) ? "" : ", again");
760
KyongHo Cho2a965362012-05-12 05:56:09 +0900761 return ret;
762}
763
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200764static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
KyongHo Cho2a965362012-05-12 05:56:09 +0900765 struct device *dev)
766{
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200767 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
768 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
Marek Szyprowski1b092052015-05-19 15:20:33 +0200769 struct sysmmu_drvdata *data, *next;
KyongHo Cho2a965362012-05-12 05:56:09 +0900770 unsigned long flags;
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200771 bool found = false;
772
773 if (!has_sysmmu(dev))
774 return;
KyongHo Cho2a965362012-05-12 05:56:09 +0900775
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200776 spin_lock_irqsave(&domain->lock, flags);
Marek Szyprowski1b092052015-05-19 15:20:33 +0200777 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200778 if (data->master == dev) {
779 if (__sysmmu_disable(data)) {
780 data->master = NULL;
781 list_del_init(&data->domain_node);
782 }
Marek Szyprowskice70ca52015-05-19 15:20:34 +0200783 pm_runtime_put(data->sysmmu);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200784 found = true;
KyongHo Cho2a965362012-05-12 05:56:09 +0900785 }
786 }
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200787 spin_unlock_irqrestore(&domain->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900788
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200789 if (found)
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530790 dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
791 __func__, &pagetable);
792 else
793 dev_err(dev, "%s: No IOMMU is attached\n", __func__);
KyongHo Cho2a965362012-05-12 05:56:09 +0900794}
795
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200796static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530797 sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
KyongHo Cho2a965362012-05-12 05:56:09 +0900798{
Cho KyongHo61128f02014-05-12 11:44:47 +0530799 if (lv1ent_section(sent)) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530800 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
Cho KyongHo61128f02014-05-12 11:44:47 +0530801 return ERR_PTR(-EADDRINUSE);
802 }
803
KyongHo Cho2a965362012-05-12 05:56:09 +0900804 if (lv1ent_fault(sent)) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530805 sysmmu_pte_t *pent;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530806 bool need_flush_flpd_cache = lv1ent_zero(sent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900807
Cho KyongHo734c3c72014-05-12 11:44:48 +0530808 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
Cho KyongHod09d78f2014-05-12 11:44:58 +0530809 BUG_ON((unsigned int)pent & (LV2TABLE_SIZE - 1));
KyongHo Cho2a965362012-05-12 05:56:09 +0900810 if (!pent)
Cho KyongHo61128f02014-05-12 11:44:47 +0530811 return ERR_PTR(-ENOMEM);
KyongHo Cho2a965362012-05-12 05:56:09 +0900812
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100813 update_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
Colin Crossdc3814f2015-05-08 17:05:44 -0700814 kmemleak_ignore(pent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900815 *pgcounter = NUM_LV2ENTRIES;
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100816 dma_map_single(dma_dev, pent, LV2TABLE_SIZE, DMA_TO_DEVICE);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530817
818 /*
Sachin Kamatf171aba2014-08-04 10:06:28 +0530819 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
820 * FLPD cache may cache the address of zero_l2_table. This
821 * function replaces the zero_l2_table with new L2 page table
822 * to write valid mappings.
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530823 * Accessing the valid area may cause page fault since FLPD
Sachin Kamatf171aba2014-08-04 10:06:28 +0530824 * cache may still cache zero_l2_table for the valid area
825 * instead of new L2 page table that has the mapping
826 * information of the valid area.
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530827 * Thus any replacement of zero_l2_table with other valid L2
828 * page table must involve FLPD cache invalidation for System
829 * MMU v3.3.
830 * FLPD cache invalidation is performed with TLB invalidation
831 * by VPN without blocking. It is safe to invalidate TLB without
832 * blocking because the target address of TLB invalidation is
833 * not currently mapped.
834 */
835 if (need_flush_flpd_cache) {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200836 struct sysmmu_drvdata *data;
Sachin Kamat365409d2014-05-22 09:50:56 +0530837
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200838 spin_lock(&domain->lock);
839 list_for_each_entry(data, &domain->clients, domain_node)
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200840 sysmmu_tlb_invalidate_flpdcache(data, iova);
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200841 spin_unlock(&domain->lock);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530842 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900843 }
844
845 return page_entry(sent, iova);
846}
847
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200848static int lv1set_section(struct exynos_iommu_domain *domain,
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530849 sysmmu_pte_t *sent, sysmmu_iova_t iova,
Cho KyongHo61128f02014-05-12 11:44:47 +0530850 phys_addr_t paddr, short *pgcnt)
KyongHo Cho2a965362012-05-12 05:56:09 +0900851{
Cho KyongHo61128f02014-05-12 11:44:47 +0530852 if (lv1ent_section(sent)) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530853 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
Cho KyongHo61128f02014-05-12 11:44:47 +0530854 iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900855 return -EADDRINUSE;
Cho KyongHo61128f02014-05-12 11:44:47 +0530856 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900857
858 if (lv1ent_page(sent)) {
Cho KyongHo61128f02014-05-12 11:44:47 +0530859 if (*pgcnt != NUM_LV2ENTRIES) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530860 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
Cho KyongHo61128f02014-05-12 11:44:47 +0530861 iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900862 return -EADDRINUSE;
Cho KyongHo61128f02014-05-12 11:44:47 +0530863 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900864
Cho KyongHo734c3c72014-05-12 11:44:48 +0530865 kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
KyongHo Cho2a965362012-05-12 05:56:09 +0900866 *pgcnt = 0;
867 }
868
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100869 update_pte(sent, mk_lv1ent_sect(paddr));
KyongHo Cho2a965362012-05-12 05:56:09 +0900870
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200871 spin_lock(&domain->lock);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530872 if (lv1ent_page_zero(sent)) {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200873 struct sysmmu_drvdata *data;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530874 /*
875 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
876 * entry by speculative prefetch of SLPD which has no mapping.
877 */
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200878 list_for_each_entry(data, &domain->clients, domain_node)
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200879 sysmmu_tlb_invalidate_flpdcache(data, iova);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530880 }
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200881 spin_unlock(&domain->lock);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530882
KyongHo Cho2a965362012-05-12 05:56:09 +0900883 return 0;
884}
885
Cho KyongHod09d78f2014-05-12 11:44:58 +0530886static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
KyongHo Cho2a965362012-05-12 05:56:09 +0900887 short *pgcnt)
888{
889 if (size == SPAGE_SIZE) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +0530890 if (WARN_ON(!lv2ent_fault(pent)))
KyongHo Cho2a965362012-05-12 05:56:09 +0900891 return -EADDRINUSE;
892
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100893 update_pte(pent, mk_lv2ent_spage(paddr));
KyongHo Cho2a965362012-05-12 05:56:09 +0900894 *pgcnt -= 1;
895 } else { /* size == LPAGE_SIZE */
896 int i;
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100897 dma_addr_t pent_base = virt_to_phys(pent);
Sachin Kamat365409d2014-05-22 09:50:56 +0530898
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100899 dma_sync_single_for_cpu(dma_dev, pent_base,
900 sizeof(*pent) * SPAGES_PER_LPAGE,
901 DMA_TO_DEVICE);
KyongHo Cho2a965362012-05-12 05:56:09 +0900902 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +0530903 if (WARN_ON(!lv2ent_fault(pent))) {
Cho KyongHo61128f02014-05-12 11:44:47 +0530904 if (i > 0)
905 memset(pent - i, 0, sizeof(*pent) * i);
KyongHo Cho2a965362012-05-12 05:56:09 +0900906 return -EADDRINUSE;
907 }
908
909 *pent = mk_lv2ent_lpage(paddr);
910 }
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100911 dma_sync_single_for_device(dma_dev, pent_base,
912 sizeof(*pent) * SPAGES_PER_LPAGE,
913 DMA_TO_DEVICE);
KyongHo Cho2a965362012-05-12 05:56:09 +0900914 *pgcnt -= SPAGES_PER_LPAGE;
915 }
916
917 return 0;
918}
919
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530920/*
921 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
922 *
Sachin Kamatf171aba2014-08-04 10:06:28 +0530923 * System MMU v3.x has advanced logic to improve address translation
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530924 * performance with caching more page table entries by a page table walk.
Sachin Kamatf171aba2014-08-04 10:06:28 +0530925 * However, the logic has a bug that while caching faulty page table entries,
926 * System MMU reports page fault if the cached fault entry is hit even though
927 * the fault entry is updated to a valid entry after the entry is cached.
928 * To prevent caching faulty page table entries which may be updated to valid
929 * entries later, the virtual memory manager should care about the workaround
930 * for the problem. The following describes the workaround.
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530931 *
932 * Any two consecutive I/O virtual address regions must have a hole of 128KiB
Sachin Kamatf171aba2014-08-04 10:06:28 +0530933 * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530934 *
Sachin Kamatf171aba2014-08-04 10:06:28 +0530935 * Precisely, any start address of I/O virtual region must be aligned with
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530936 * the following sizes for System MMU v3.1 and v3.2.
937 * System MMU v3.1: 128KiB
938 * System MMU v3.2: 256KiB
939 *
940 * Because System MMU v3.3 caches page table entries more aggressively, it needs
Sachin Kamatf171aba2014-08-04 10:06:28 +0530941 * more workarounds.
942 * - Any two consecutive I/O virtual regions must have a hole of size larger
943 * than or equal to 128KiB.
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530944 * - Start address of an I/O virtual region must be aligned by 128KiB.
945 */
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200946static int exynos_iommu_map(struct iommu_domain *iommu_domain,
947 unsigned long l_iova, phys_addr_t paddr, size_t size,
948 int prot)
KyongHo Cho2a965362012-05-12 05:56:09 +0900949{
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200950 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Cho KyongHod09d78f2014-05-12 11:44:58 +0530951 sysmmu_pte_t *entry;
952 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
KyongHo Cho2a965362012-05-12 05:56:09 +0900953 unsigned long flags;
954 int ret = -ENOMEM;
955
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200956 BUG_ON(domain->pgtable == NULL);
KyongHo Cho2a965362012-05-12 05:56:09 +0900957
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200958 spin_lock_irqsave(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900959
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200960 entry = section_entry(domain->pgtable, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900961
962 if (size == SECT_SIZE) {
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200963 ret = lv1set_section(domain, entry, iova, paddr,
964 &domain->lv2entcnt[lv1ent_offset(iova)]);
KyongHo Cho2a965362012-05-12 05:56:09 +0900965 } else {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530966 sysmmu_pte_t *pent;
KyongHo Cho2a965362012-05-12 05:56:09 +0900967
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200968 pent = alloc_lv2entry(domain, entry, iova,
969 &domain->lv2entcnt[lv1ent_offset(iova)]);
KyongHo Cho2a965362012-05-12 05:56:09 +0900970
Cho KyongHo61128f02014-05-12 11:44:47 +0530971 if (IS_ERR(pent))
972 ret = PTR_ERR(pent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900973 else
974 ret = lv2set_page(pent, paddr, size,
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200975 &domain->lv2entcnt[lv1ent_offset(iova)]);
KyongHo Cho2a965362012-05-12 05:56:09 +0900976 }
977
Cho KyongHo61128f02014-05-12 11:44:47 +0530978 if (ret)
Cho KyongHo0bf4e542014-05-12 11:45:00 +0530979 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
980 __func__, ret, size, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900981
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200982 spin_unlock_irqrestore(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900983
984 return ret;
985}
986
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200987static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain,
988 sysmmu_iova_t iova, size_t size)
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530989{
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200990 struct sysmmu_drvdata *data;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530991 unsigned long flags;
992
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200993 spin_lock_irqsave(&domain->lock, flags);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530994
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200995 list_for_each_entry(data, &domain->clients, domain_node)
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200996 sysmmu_tlb_invalidate_entry(data, iova, size);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530997
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200998 spin_unlock_irqrestore(&domain->lock, flags);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530999}
1000
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001001static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
1002 unsigned long l_iova, size_t size)
KyongHo Cho2a965362012-05-12 05:56:09 +09001003{
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001004 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Cho KyongHod09d78f2014-05-12 11:44:58 +05301005 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1006 sysmmu_pte_t *ent;
Cho KyongHo61128f02014-05-12 11:44:47 +05301007 size_t err_pgsize;
Cho KyongHod09d78f2014-05-12 11:44:58 +05301008 unsigned long flags;
KyongHo Cho2a965362012-05-12 05:56:09 +09001009
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001010 BUG_ON(domain->pgtable == NULL);
KyongHo Cho2a965362012-05-12 05:56:09 +09001011
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001012 spin_lock_irqsave(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001013
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001014 ent = section_entry(domain->pgtable, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +09001015
1016 if (lv1ent_section(ent)) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301017 if (WARN_ON(size < SECT_SIZE)) {
Cho KyongHo61128f02014-05-12 11:44:47 +05301018 err_pgsize = SECT_SIZE;
1019 goto err;
1020 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001021
Sachin Kamatf171aba2014-08-04 10:06:28 +05301022 /* workaround for h/w bug in System MMU v3.3 */
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001023 update_pte(ent, ZERO_LV2LINK);
KyongHo Cho2a965362012-05-12 05:56:09 +09001024 size = SECT_SIZE;
1025 goto done;
1026 }
1027
1028 if (unlikely(lv1ent_fault(ent))) {
1029 if (size > SECT_SIZE)
1030 size = SECT_SIZE;
1031 goto done;
1032 }
1033
1034 /* lv1ent_page(sent) == true here */
1035
1036 ent = page_entry(ent, iova);
1037
1038 if (unlikely(lv2ent_fault(ent))) {
1039 size = SPAGE_SIZE;
1040 goto done;
1041 }
1042
1043 if (lv2ent_small(ent)) {
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001044 update_pte(ent, 0);
KyongHo Cho2a965362012-05-12 05:56:09 +09001045 size = SPAGE_SIZE;
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001046 domain->lv2entcnt[lv1ent_offset(iova)] += 1;
KyongHo Cho2a965362012-05-12 05:56:09 +09001047 goto done;
1048 }
1049
1050 /* lv1ent_large(ent) == true here */
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301051 if (WARN_ON(size < LPAGE_SIZE)) {
Cho KyongHo61128f02014-05-12 11:44:47 +05301052 err_pgsize = LPAGE_SIZE;
1053 goto err;
1054 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001055
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001056 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent),
1057 sizeof(*ent) * SPAGES_PER_LPAGE,
1058 DMA_TO_DEVICE);
KyongHo Cho2a965362012-05-12 05:56:09 +09001059 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001060 dma_sync_single_for_device(dma_dev, virt_to_phys(ent),
1061 sizeof(*ent) * SPAGES_PER_LPAGE,
1062 DMA_TO_DEVICE);
KyongHo Cho2a965362012-05-12 05:56:09 +09001063 size = LPAGE_SIZE;
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001064 domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
KyongHo Cho2a965362012-05-12 05:56:09 +09001065done:
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001066 spin_unlock_irqrestore(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001067
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001068 exynos_iommu_tlb_invalidate_entry(domain, iova, size);
KyongHo Cho2a965362012-05-12 05:56:09 +09001069
KyongHo Cho2a965362012-05-12 05:56:09 +09001070 return size;
Cho KyongHo61128f02014-05-12 11:44:47 +05301071err:
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001072 spin_unlock_irqrestore(&domain->pgtablelock, flags);
Cho KyongHo61128f02014-05-12 11:44:47 +05301073
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301074 pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1075 __func__, size, iova, err_pgsize);
Cho KyongHo61128f02014-05-12 11:44:47 +05301076
1077 return 0;
KyongHo Cho2a965362012-05-12 05:56:09 +09001078}
1079
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001080static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
Varun Sethibb5547a2013-03-29 01:23:58 +05301081 dma_addr_t iova)
KyongHo Cho2a965362012-05-12 05:56:09 +09001082{
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001083 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Cho KyongHod09d78f2014-05-12 11:44:58 +05301084 sysmmu_pte_t *entry;
KyongHo Cho2a965362012-05-12 05:56:09 +09001085 unsigned long flags;
1086 phys_addr_t phys = 0;
1087
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001088 spin_lock_irqsave(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001089
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001090 entry = section_entry(domain->pgtable, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +09001091
1092 if (lv1ent_section(entry)) {
1093 phys = section_phys(entry) + section_offs(iova);
1094 } else if (lv1ent_page(entry)) {
1095 entry = page_entry(entry, iova);
1096
1097 if (lv2ent_large(entry))
1098 phys = lpage_phys(entry) + lpage_offs(iova);
1099 else if (lv2ent_small(entry))
1100 phys = spage_phys(entry) + spage_offs(iova);
1101 }
1102
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001103 spin_unlock_irqrestore(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001104
1105 return phys;
1106}
1107
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001108static struct iommu_group *get_device_iommu_group(struct device *dev)
1109{
1110 struct iommu_group *group;
1111
1112 group = iommu_group_get(dev);
1113 if (!group)
1114 group = iommu_group_alloc();
1115
1116 return group;
1117}
1118
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301119static int exynos_iommu_add_device(struct device *dev)
1120{
1121 struct iommu_group *group;
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301122
Marek Szyprowski06801db2015-05-19 15:20:32 +02001123 if (!has_sysmmu(dev))
1124 return -ENODEV;
1125
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001126 group = iommu_group_get_for_dev(dev);
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301127
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001128 if (IS_ERR(group))
1129 return PTR_ERR(group);
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301130
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301131 iommu_group_put(group);
1132
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001133 return 0;
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301134}
1135
1136static void exynos_iommu_remove_device(struct device *dev)
1137{
Marek Szyprowski06801db2015-05-19 15:20:32 +02001138 if (!has_sysmmu(dev))
1139 return;
1140
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301141 iommu_group_remove_device(dev);
1142}
1143
Marek Szyprowskiaa759fd2015-05-19 15:20:37 +02001144static int exynos_iommu_of_xlate(struct device *dev,
1145 struct of_phandle_args *spec)
1146{
1147 struct exynos_iommu_owner *owner = dev->archdata.iommu;
1148 struct platform_device *sysmmu = of_find_device_by_node(spec->np);
1149 struct sysmmu_drvdata *data;
1150
1151 if (!sysmmu)
1152 return -ENODEV;
1153
1154 data = platform_get_drvdata(sysmmu);
1155 if (!data)
1156 return -ENODEV;
1157
1158 if (!owner) {
1159 owner = kzalloc(sizeof(*owner), GFP_KERNEL);
1160 if (!owner)
1161 return -ENOMEM;
1162
1163 INIT_LIST_HEAD(&owner->controllers);
1164 dev->archdata.iommu = owner;
1165 }
1166
1167 list_add_tail(&data->owner_node, &owner->controllers);
1168 return 0;
1169}
1170
Marek Szyprowski8ed55c82015-05-19 15:20:36 +02001171static struct iommu_ops exynos_iommu_ops = {
Joerg Roedele1fd1ea2015-03-26 13:43:11 +01001172 .domain_alloc = exynos_iommu_domain_alloc,
1173 .domain_free = exynos_iommu_domain_free,
Bjorn Helgaasba5fa6f2014-05-08 14:49:14 -06001174 .attach_dev = exynos_iommu_attach_device,
1175 .detach_dev = exynos_iommu_detach_device,
1176 .map = exynos_iommu_map,
1177 .unmap = exynos_iommu_unmap,
Olav Haugan315786e2014-10-25 09:55:16 -07001178 .map_sg = default_iommu_map_sg,
Bjorn Helgaasba5fa6f2014-05-08 14:49:14 -06001179 .iova_to_phys = exynos_iommu_iova_to_phys,
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001180 .device_group = get_device_iommu_group,
Bjorn Helgaasba5fa6f2014-05-08 14:49:14 -06001181 .add_device = exynos_iommu_add_device,
1182 .remove_device = exynos_iommu_remove_device,
KyongHo Cho2a965362012-05-12 05:56:09 +09001183 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
Marek Szyprowskiaa759fd2015-05-19 15:20:37 +02001184 .of_xlate = exynos_iommu_of_xlate,
KyongHo Cho2a965362012-05-12 05:56:09 +09001185};
1186
Marek Szyprowski8ed55c82015-05-19 15:20:36 +02001187static bool init_done;
1188
KyongHo Cho2a965362012-05-12 05:56:09 +09001189static int __init exynos_iommu_init(void)
1190{
1191 int ret;
1192
Cho KyongHo734c3c72014-05-12 11:44:48 +05301193 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1194 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1195 if (!lv2table_kmem_cache) {
1196 pr_err("%s: Failed to create kmem cache\n", __func__);
1197 return -ENOMEM;
1198 }
1199
KyongHo Cho2a965362012-05-12 05:56:09 +09001200 ret = platform_driver_register(&exynos_sysmmu_driver);
Cho KyongHo734c3c72014-05-12 11:44:48 +05301201 if (ret) {
1202 pr_err("%s: Failed to register driver\n", __func__);
1203 goto err_reg_driver;
1204 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001205
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301206 zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1207 if (zero_lv2_table == NULL) {
1208 pr_err("%s: Failed to allocate zero level2 page table\n",
1209 __func__);
1210 ret = -ENOMEM;
1211 goto err_zero_lv2;
1212 }
1213
Cho KyongHo734c3c72014-05-12 11:44:48 +05301214 ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1215 if (ret) {
1216 pr_err("%s: Failed to register exynos-iommu driver.\n",
1217 __func__);
1218 goto err_set_iommu;
1219 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001220
Marek Szyprowski8ed55c82015-05-19 15:20:36 +02001221 init_done = true;
1222
Cho KyongHo734c3c72014-05-12 11:44:48 +05301223 return 0;
1224err_set_iommu:
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301225 kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
1226err_zero_lv2:
Cho KyongHo734c3c72014-05-12 11:44:48 +05301227 platform_driver_unregister(&exynos_sysmmu_driver);
1228err_reg_driver:
1229 kmem_cache_destroy(lv2table_kmem_cache);
KyongHo Cho2a965362012-05-12 05:56:09 +09001230 return ret;
1231}
Marek Szyprowski8ed55c82015-05-19 15:20:36 +02001232
1233static int __init exynos_iommu_of_setup(struct device_node *np)
1234{
1235 struct platform_device *pdev;
1236
1237 if (!init_done)
1238 exynos_iommu_init();
1239
1240 pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root);
1241 if (IS_ERR(pdev))
1242 return PTR_ERR(pdev);
1243
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001244 /*
1245 * use the first registered sysmmu device for performing
1246 * dma mapping operations on iommu page tables (cpu cache flush)
1247 */
1248 if (!dma_dev)
1249 dma_dev = &pdev->dev;
1250
Marek Szyprowski8ed55c82015-05-19 15:20:36 +02001251 of_iommu_set_ops(np, &exynos_iommu_ops);
1252 return 0;
1253}
1254
1255IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu",
1256 exynos_iommu_of_setup);