blob: 186ff5cc975ca14cc33484daa4562da4d73cde30 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Marek Szyprowski740a01e2016-02-18 15:12:58 +01002/*
3 * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd.
KyongHo Cho2a965362012-05-12 05:56:09 +09004 * http://www.samsung.com
KyongHo Cho2a965362012-05-12 05:56:09 +09005 */
6
7#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
8#define DEBUG
9#endif
10
KyongHo Cho2a965362012-05-12 05:56:09 +090011#include <linux/clk.h>
Marek Szyprowski8ed55c82015-05-19 15:20:36 +020012#include <linux/dma-mapping.h>
KyongHo Cho2a965362012-05-12 05:56:09 +090013#include <linux/err.h>
Marek Szyprowski312900c2015-05-19 15:20:30 +020014#include <linux/io.h>
KyongHo Cho2a965362012-05-12 05:56:09 +090015#include <linux/iommu.h>
Marek Szyprowski312900c2015-05-19 15:20:30 +020016#include <linux/interrupt.h>
Randy Dunlap514c6032018-04-05 16:25:34 -070017#include <linux/kmemleak.h>
KyongHo Cho2a965362012-05-12 05:56:09 +090018#include <linux/list.h>
Marek Szyprowski8ed55c82015-05-19 15:20:36 +020019#include <linux/of.h>
20#include <linux/of_iommu.h>
21#include <linux/of_platform.h>
Marek Szyprowski312900c2015-05-19 15:20:30 +020022#include <linux/platform_device.h>
23#include <linux/pm_runtime.h>
24#include <linux/slab.h>
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +010025#include <linux/dma-iommu.h>
KyongHo Cho2a965362012-05-12 05:56:09 +090026
Cho KyongHod09d78f2014-05-12 11:44:58 +053027typedef u32 sysmmu_iova_t;
28typedef u32 sysmmu_pte_t;
29
Sachin Kamatf171aba2014-08-04 10:06:28 +053030/* We do not consider super section mapping (16MB) */
KyongHo Cho2a965362012-05-12 05:56:09 +090031#define SECT_ORDER 20
32#define LPAGE_ORDER 16
33#define SPAGE_ORDER 12
34
35#define SECT_SIZE (1 << SECT_ORDER)
36#define LPAGE_SIZE (1 << LPAGE_ORDER)
37#define SPAGE_SIZE (1 << SPAGE_ORDER)
38
39#define SECT_MASK (~(SECT_SIZE - 1))
40#define LPAGE_MASK (~(LPAGE_SIZE - 1))
41#define SPAGE_MASK (~(SPAGE_SIZE - 1))
42
Cho KyongHo66a7ed82014-05-12 11:45:04 +053043#define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
44 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
45#define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
46#define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
47#define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
48 ((*(sent) & 3) == 1))
KyongHo Cho2a965362012-05-12 05:56:09 +090049#define lv1ent_section(sent) ((*(sent) & 3) == 2)
50
51#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
52#define lv2ent_small(pent) ((*(pent) & 2) == 2)
53#define lv2ent_large(pent) ((*(pent) & 3) == 1)
54
Marek Szyprowski740a01e2016-02-18 15:12:58 +010055/*
56 * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
57 * v5.0 introduced support for 36bit physical address space by shifting
58 * all page entry values by 4 bits.
59 * All SYSMMU controllers in the system support the address spaces of the same
60 * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper
61 * value (0 or 4).
62 */
63static short PG_ENT_SHIFT = -1;
64#define SYSMMU_PG_ENT_SHIFT 0
65#define SYSMMU_V5_PG_ENT_SHIFT 4
KyongHo Cho2a965362012-05-12 05:56:09 +090066
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +010067static const sysmmu_pte_t *LV1_PROT;
68static const sysmmu_pte_t SYSMMU_LV1_PROT[] = {
69 ((0 << 15) | (0 << 10)), /* no access */
70 ((1 << 15) | (1 << 10)), /* IOMMU_READ only */
71 ((0 << 15) | (1 << 10)), /* IOMMU_WRITE not supported, use read/write */
72 ((0 << 15) | (1 << 10)), /* IOMMU_READ | IOMMU_WRITE */
73};
74static const sysmmu_pte_t SYSMMU_V5_LV1_PROT[] = {
75 (0 << 4), /* no access */
76 (1 << 4), /* IOMMU_READ only */
77 (2 << 4), /* IOMMU_WRITE only */
78 (3 << 4), /* IOMMU_READ | IOMMU_WRITE */
79};
80
81static const sysmmu_pte_t *LV2_PROT;
82static const sysmmu_pte_t SYSMMU_LV2_PROT[] = {
83 ((0 << 9) | (0 << 4)), /* no access */
84 ((1 << 9) | (1 << 4)), /* IOMMU_READ only */
85 ((0 << 9) | (1 << 4)), /* IOMMU_WRITE not supported, use read/write */
86 ((0 << 9) | (1 << 4)), /* IOMMU_READ | IOMMU_WRITE */
87};
88static const sysmmu_pte_t SYSMMU_V5_LV2_PROT[] = {
89 (0 << 2), /* no access */
90 (1 << 2), /* IOMMU_READ only */
91 (2 << 2), /* IOMMU_WRITE only */
92 (3 << 2), /* IOMMU_READ | IOMMU_WRITE */
93};
94
95#define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE)
96
Marek Szyprowski740a01e2016-02-18 15:12:58 +010097#define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
98#define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
99#define section_offs(iova) (iova & (SECT_SIZE - 1))
100#define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK)
101#define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
102#define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK)
103#define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
KyongHo Cho2a965362012-05-12 05:56:09 +0900104
105#define NUM_LV1ENTRIES 4096
Cho KyongHod09d78f2014-05-12 11:44:58 +0530106#define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
KyongHo Cho2a965362012-05-12 05:56:09 +0900107
Cho KyongHod09d78f2014-05-12 11:44:58 +0530108static u32 lv1ent_offset(sysmmu_iova_t iova)
109{
110 return iova >> SECT_ORDER;
111}
112
113static u32 lv2ent_offset(sysmmu_iova_t iova)
114{
115 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
116}
117
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100118#define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
Cho KyongHod09d78f2014-05-12 11:44:58 +0530119#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
KyongHo Cho2a965362012-05-12 05:56:09 +0900120
121#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100122#define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
KyongHo Cho2a965362012-05-12 05:56:09 +0900123
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +0100124#define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2)
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100125#define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +0100126#define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1)
127#define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2)
KyongHo Cho2a965362012-05-12 05:56:09 +0900128
129#define CTRL_ENABLE 0x5
130#define CTRL_BLOCK 0x7
131#define CTRL_DISABLE 0x0
132
Cho KyongHoeeb51842014-05-12 11:45:03 +0530133#define CFG_LRU 0x1
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +0100134#define CFG_EAP (1 << 2)
Cho KyongHoeeb51842014-05-12 11:45:03 +0530135#define CFG_QOS(n) ((n & 0xF) << 7)
Cho KyongHoeeb51842014-05-12 11:45:03 +0530136#define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
137#define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
138#define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
139
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100140/* common registers */
KyongHo Cho2a965362012-05-12 05:56:09 +0900141#define REG_MMU_CTRL 0x000
142#define REG_MMU_CFG 0x004
143#define REG_MMU_STATUS 0x008
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100144#define REG_MMU_VERSION 0x034
145
146#define MMU_MAJ_VER(val) ((val) >> 7)
147#define MMU_MIN_VER(val) ((val) & 0x7F)
148#define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
149
150#define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
151
152/* v1.x - v3.x registers */
KyongHo Cho2a965362012-05-12 05:56:09 +0900153#define REG_MMU_FLUSH 0x00C
154#define REG_MMU_FLUSH_ENTRY 0x010
155#define REG_PT_BASE_ADDR 0x014
156#define REG_INT_STATUS 0x018
157#define REG_INT_CLEAR 0x01C
158
159#define REG_PAGE_FAULT_ADDR 0x024
160#define REG_AW_FAULT_ADDR 0x028
161#define REG_AR_FAULT_ADDR 0x02C
162#define REG_DEFAULT_SLAVE_ADDR 0x030
163
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100164/* v5.x registers */
165#define REG_V5_PT_BASE_PFN 0x00C
166#define REG_V5_MMU_FLUSH_ALL 0x010
167#define REG_V5_MMU_FLUSH_ENTRY 0x014
Marek Szyprowskid5bf7392017-03-24 10:19:01 +0100168#define REG_V5_MMU_FLUSH_RANGE 0x018
169#define REG_V5_MMU_FLUSH_START 0x020
170#define REG_V5_MMU_FLUSH_END 0x024
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100171#define REG_V5_INT_STATUS 0x060
172#define REG_V5_INT_CLEAR 0x064
173#define REG_V5_FAULT_AR_VA 0x070
174#define REG_V5_FAULT_AW_VA 0x080
KyongHo Cho2a965362012-05-12 05:56:09 +0900175
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530176#define has_sysmmu(dev) (dev->archdata.iommu != NULL)
177
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100178static struct device *dma_dev;
Cho KyongHo734c3c72014-05-12 11:44:48 +0530179static struct kmem_cache *lv2table_kmem_cache;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530180static sysmmu_pte_t *zero_lv2_table;
181#define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
Cho KyongHo734c3c72014-05-12 11:44:48 +0530182
Cho KyongHod09d78f2014-05-12 11:44:58 +0530183static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
KyongHo Cho2a965362012-05-12 05:56:09 +0900184{
185 return pgtable + lv1ent_offset(iova);
186}
187
Cho KyongHod09d78f2014-05-12 11:44:58 +0530188static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
KyongHo Cho2a965362012-05-12 05:56:09 +0900189{
Cho KyongHod09d78f2014-05-12 11:44:58 +0530190 return (sysmmu_pte_t *)phys_to_virt(
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530191 lv2table_base(sent)) + lv2ent_offset(iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900192}
193
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100194/*
195 * IOMMU fault information register
196 */
197struct sysmmu_fault_info {
198 unsigned int bit; /* bit number in STATUS register */
199 unsigned short addr_reg; /* register to read VA fault address */
200 const char *name; /* human readable fault name */
201 unsigned int type; /* fault type for report_iommu_fault */
KyongHo Cho2a965362012-05-12 05:56:09 +0900202};
203
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100204static const struct sysmmu_fault_info sysmmu_faults[] = {
205 { 0, REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ },
206 { 1, REG_AR_FAULT_ADDR, "AR MULTI-HIT", IOMMU_FAULT_READ },
207 { 2, REG_AW_FAULT_ADDR, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
208 { 3, REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ },
209 { 4, REG_AR_FAULT_ADDR, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
210 { 5, REG_AR_FAULT_ADDR, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
211 { 6, REG_AW_FAULT_ADDR, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
212 { 7, REG_AW_FAULT_ADDR, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
KyongHo Cho2a965362012-05-12 05:56:09 +0900213};
214
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100215static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
216 { 0, REG_V5_FAULT_AR_VA, "AR PTW", IOMMU_FAULT_READ },
217 { 1, REG_V5_FAULT_AR_VA, "AR PAGE", IOMMU_FAULT_READ },
218 { 2, REG_V5_FAULT_AR_VA, "AR MULTI-HIT", IOMMU_FAULT_READ },
219 { 3, REG_V5_FAULT_AR_VA, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
220 { 4, REG_V5_FAULT_AR_VA, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
221 { 16, REG_V5_FAULT_AW_VA, "AW PTW", IOMMU_FAULT_WRITE },
222 { 17, REG_V5_FAULT_AW_VA, "AW PAGE", IOMMU_FAULT_WRITE },
223 { 18, REG_V5_FAULT_AW_VA, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
224 { 19, REG_V5_FAULT_AW_VA, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
225 { 20, REG_V5_FAULT_AW_VA, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
226};
227
Marek Szyprowski2860af32015-05-19 15:20:31 +0200228/*
229 * This structure is attached to dev.archdata.iommu of the master device
230 * on device add, contains a list of SYSMMU controllers defined by device tree,
231 * which are bound to given master device. It is usually referenced by 'owner'
232 * pointer.
233*/
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530234struct exynos_iommu_owner {
Marek Szyprowski1b092052015-05-19 15:20:33 +0200235 struct list_head controllers; /* list of sysmmu_drvdata.owner_node */
Marek Szyprowski5fa61cb2016-02-18 15:13:00 +0100236 struct iommu_domain *domain; /* domain this device is attached */
Marek Szyprowski9b265532016-11-14 11:08:11 +0100237 struct mutex rpm_lock; /* for runtime pm of all sysmmus */
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530238};
239
Marek Szyprowski2860af32015-05-19 15:20:31 +0200240/*
241 * This structure exynos specific generalization of struct iommu_domain.
242 * It contains list of SYSMMU controllers from all master devices, which has
243 * been attached to this domain and page tables of IO address space defined by
244 * it. It is usually referenced by 'domain' pointer.
245 */
KyongHo Cho2a965362012-05-12 05:56:09 +0900246struct exynos_iommu_domain {
Marek Szyprowski2860af32015-05-19 15:20:31 +0200247 struct list_head clients; /* list of sysmmu_drvdata.domain_node */
248 sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
249 short *lv2entcnt; /* free lv2 entry counter for each section */
250 spinlock_t lock; /* lock for modyfying list of clients */
251 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100252 struct iommu_domain domain; /* generic domain data structure */
KyongHo Cho2a965362012-05-12 05:56:09 +0900253};
254
Marek Szyprowski2860af32015-05-19 15:20:31 +0200255/*
256 * This structure hold all data of a single SYSMMU controller, this includes
257 * hw resources like registers and clocks, pointers and list nodes to connect
258 * it to all other structures, internal state and parameters read from device
259 * tree. It is usually referenced by 'data' pointer.
260 */
KyongHo Cho2a965362012-05-12 05:56:09 +0900261struct sysmmu_drvdata {
Marek Szyprowski2860af32015-05-19 15:20:31 +0200262 struct device *sysmmu; /* SYSMMU controller device */
263 struct device *master; /* master device (owner) */
Marek Szyprowski7a974b22017-09-15 13:05:08 +0200264 struct device_link *link; /* runtime PM link to master */
Marek Szyprowski2860af32015-05-19 15:20:31 +0200265 void __iomem *sfrbase; /* our registers */
266 struct clk *clk; /* SYSMMU's clock */
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100267 struct clk *aclk; /* SYSMMU's aclk clock */
268 struct clk *pclk; /* SYSMMU's pclk clock */
Marek Szyprowski2860af32015-05-19 15:20:31 +0200269 struct clk *clk_master; /* master's device clock */
Marek Szyprowski2860af32015-05-19 15:20:31 +0200270 spinlock_t lock; /* lock for modyfying state */
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100271 bool active; /* current status */
Marek Szyprowski2860af32015-05-19 15:20:31 +0200272 struct exynos_iommu_domain *domain; /* domain we belong to */
273 struct list_head domain_node; /* node for domain clients list */
Marek Szyprowski1b092052015-05-19 15:20:33 +0200274 struct list_head owner_node; /* node for owner controllers list */
Marek Szyprowski2860af32015-05-19 15:20:31 +0200275 phys_addr_t pgtable; /* assigned page table structure */
276 unsigned int version; /* our version */
Joerg Roedeld2c302b2017-02-03 13:23:42 +0100277
278 struct iommu_device iommu; /* IOMMU core handle */
KyongHo Cho2a965362012-05-12 05:56:09 +0900279};
280
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100281static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
282{
283 return container_of(dom, struct exynos_iommu_domain, domain);
284}
285
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100286static void sysmmu_unblock(struct sysmmu_drvdata *data)
KyongHo Cho2a965362012-05-12 05:56:09 +0900287{
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100288 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
KyongHo Cho2a965362012-05-12 05:56:09 +0900289}
290
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100291static bool sysmmu_block(struct sysmmu_drvdata *data)
KyongHo Cho2a965362012-05-12 05:56:09 +0900292{
293 int i = 120;
294
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100295 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
296 while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1))
KyongHo Cho2a965362012-05-12 05:56:09 +0900297 --i;
298
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100299 if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) {
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100300 sysmmu_unblock(data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900301 return false;
302 }
303
304 return true;
305}
306
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100307static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data)
KyongHo Cho2a965362012-05-12 05:56:09 +0900308{
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100309 if (MMU_MAJ_VER(data->version) < 5)
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100310 writel(0x1, data->sfrbase + REG_MMU_FLUSH);
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100311 else
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100312 writel(0x1, data->sfrbase + REG_V5_MMU_FLUSH_ALL);
KyongHo Cho2a965362012-05-12 05:56:09 +0900313}
314
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100315static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
Cho KyongHod09d78f2014-05-12 11:44:58 +0530316 sysmmu_iova_t iova, unsigned int num_inv)
KyongHo Cho2a965362012-05-12 05:56:09 +0900317{
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530318 unsigned int i;
Sachin Kamat365409d2014-05-22 09:50:56 +0530319
Marek Szyprowskid5bf7392017-03-24 10:19:01 +0100320 if (MMU_MAJ_VER(data->version) < 5) {
321 for (i = 0; i < num_inv; i++) {
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100322 writel((iova & SPAGE_MASK) | 1,
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100323 data->sfrbase + REG_MMU_FLUSH_ENTRY);
Marek Szyprowskid5bf7392017-03-24 10:19:01 +0100324 iova += SPAGE_SIZE;
325 }
326 } else {
327 if (num_inv == 1) {
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100328 writel((iova & SPAGE_MASK) | 1,
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100329 data->sfrbase + REG_V5_MMU_FLUSH_ENTRY);
Marek Szyprowskid5bf7392017-03-24 10:19:01 +0100330 } else {
331 writel((iova & SPAGE_MASK),
332 data->sfrbase + REG_V5_MMU_FLUSH_START);
333 writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE,
334 data->sfrbase + REG_V5_MMU_FLUSH_END);
335 writel(1, data->sfrbase + REG_V5_MMU_FLUSH_RANGE);
336 }
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530337 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900338}
339
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100340static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
KyongHo Cho2a965362012-05-12 05:56:09 +0900341{
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100342 if (MMU_MAJ_VER(data->version) < 5)
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100343 writel(pgd, data->sfrbase + REG_PT_BASE_ADDR);
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100344 else
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100345 writel(pgd >> PAGE_SHIFT,
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100346 data->sfrbase + REG_V5_PT_BASE_PFN);
KyongHo Cho2a965362012-05-12 05:56:09 +0900347
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100348 __sysmmu_tlb_invalidate(data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900349}
350
Marek Szyprowskifecc49d2016-05-23 11:30:09 +0200351static void __sysmmu_enable_clocks(struct sysmmu_drvdata *data)
352{
353 BUG_ON(clk_prepare_enable(data->clk_master));
354 BUG_ON(clk_prepare_enable(data->clk));
355 BUG_ON(clk_prepare_enable(data->pclk));
356 BUG_ON(clk_prepare_enable(data->aclk));
357}
358
359static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data)
360{
361 clk_disable_unprepare(data->aclk);
362 clk_disable_unprepare(data->pclk);
363 clk_disable_unprepare(data->clk);
364 clk_disable_unprepare(data->clk_master);
365}
366
Marek Szyprowski850d3132016-02-18 15:12:56 +0100367static void __sysmmu_get_version(struct sysmmu_drvdata *data)
368{
369 u32 ver;
370
Marek Szyprowskifecc49d2016-05-23 11:30:09 +0200371 __sysmmu_enable_clocks(data);
Marek Szyprowski850d3132016-02-18 15:12:56 +0100372
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100373 ver = readl(data->sfrbase + REG_MMU_VERSION);
Marek Szyprowski850d3132016-02-18 15:12:56 +0100374
375 /* controllers on some SoCs don't report proper version */
376 if (ver == 0x80000001u)
377 data->version = MAKE_MMU_VER(1, 0);
378 else
379 data->version = MMU_RAW_VER(ver);
380
381 dev_dbg(data->sysmmu, "hardware version: %d.%d\n",
382 MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version));
383
Marek Szyprowskifecc49d2016-05-23 11:30:09 +0200384 __sysmmu_disable_clocks(data);
Marek Szyprowski850d3132016-02-18 15:12:56 +0100385}
386
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100387static void show_fault_information(struct sysmmu_drvdata *data,
388 const struct sysmmu_fault_info *finfo,
389 sysmmu_iova_t fault_addr)
KyongHo Cho2a965362012-05-12 05:56:09 +0900390{
Cho KyongHod09d78f2014-05-12 11:44:58 +0530391 sysmmu_pte_t *ent;
KyongHo Cho2a965362012-05-12 05:56:09 +0900392
Marek Szyprowskiec5d2412017-01-09 13:03:53 +0100393 dev_err(data->sysmmu, "%s: %s FAULT occurred at %#x\n",
394 dev_name(data->master), finfo->name, fault_addr);
395 dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable);
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100396 ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
Marek Szyprowskiec5d2412017-01-09 13:03:53 +0100397 dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900398 if (lv1ent_page(ent)) {
399 ent = page_entry(ent, fault_addr);
Marek Szyprowskiec5d2412017-01-09 13:03:53 +0100400 dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900401 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900402}
403
404static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
405{
Sachin Kamatf171aba2014-08-04 10:06:28 +0530406 /* SYSMMU is in blocked state when interrupt occurred. */
KyongHo Cho2a965362012-05-12 05:56:09 +0900407 struct sysmmu_drvdata *data = dev_id;
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100408 const struct sysmmu_fault_info *finfo;
409 unsigned int i, n, itype;
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100410 sysmmu_iova_t fault_addr = -1;
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100411 unsigned short reg_status, reg_clear;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530412 int ret = -ENOSYS;
KyongHo Cho2a965362012-05-12 05:56:09 +0900413
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100414 WARN_ON(!data->active);
KyongHo Cho2a965362012-05-12 05:56:09 +0900415
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100416 if (MMU_MAJ_VER(data->version) < 5) {
417 reg_status = REG_INT_STATUS;
418 reg_clear = REG_INT_CLEAR;
419 finfo = sysmmu_faults;
420 n = ARRAY_SIZE(sysmmu_faults);
421 } else {
422 reg_status = REG_V5_INT_STATUS;
423 reg_clear = REG_V5_INT_CLEAR;
424 finfo = sysmmu_v5_faults;
425 n = ARRAY_SIZE(sysmmu_v5_faults);
426 }
427
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530428 spin_lock(&data->lock);
429
Marek Szyprowskib398af22016-02-18 15:12:51 +0100430 clk_enable(data->clk_master);
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530431
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100432 itype = __ffs(readl(data->sfrbase + reg_status));
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100433 for (i = 0; i < n; i++, finfo++)
434 if (finfo->bit == itype)
435 break;
436 /* unknown/unsupported fault */
437 BUG_ON(i == n);
KyongHo Cho2a965362012-05-12 05:56:09 +0900438
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100439 /* print debug message */
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100440 fault_addr = readl(data->sfrbase + finfo->addr_reg);
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100441 show_fault_information(data, finfo, fault_addr);
KyongHo Cho2a965362012-05-12 05:56:09 +0900442
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100443 if (data->domain)
444 ret = report_iommu_fault(&data->domain->domain,
445 data->master, fault_addr, finfo->type);
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530446 /* fault is not recovered by fault handler */
447 BUG_ON(ret != 0);
KyongHo Cho2a965362012-05-12 05:56:09 +0900448
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100449 writel(1 << itype, data->sfrbase + reg_clear);
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530450
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100451 sysmmu_unblock(data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900452
Marek Szyprowskib398af22016-02-18 15:12:51 +0100453 clk_disable(data->clk_master);
Cho KyongHo70605872014-05-12 11:44:55 +0530454
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530455 spin_unlock(&data->lock);
KyongHo Cho2a965362012-05-12 05:56:09 +0900456
457 return IRQ_HANDLED;
458}
459
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100460static void __sysmmu_disable(struct sysmmu_drvdata *data)
KyongHo Cho2a965362012-05-12 05:56:09 +0900461{
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530462 unsigned long flags;
463
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100464 clk_enable(data->clk_master);
465
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530466 spin_lock_irqsave(&data->lock, flags);
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100467 writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
468 writel(0, data->sfrbase + REG_MMU_CFG);
469 data->active = false;
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530470 spin_unlock_irqrestore(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900471
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100472 __sysmmu_disable_clocks(data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900473}
474
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530475static void __sysmmu_init_config(struct sysmmu_drvdata *data)
476{
Marek Szyprowski83addec2016-02-18 15:12:54 +0100477 unsigned int cfg;
Cho KyongHoeeb51842014-05-12 11:45:03 +0530478
Marek Szyprowski83addec2016-02-18 15:12:54 +0100479 if (data->version <= MAKE_MMU_VER(3, 1))
480 cfg = CFG_LRU | CFG_QOS(15);
481 else if (data->version <= MAKE_MMU_VER(3, 2))
482 cfg = CFG_LRU | CFG_QOS(15) | CFG_FLPDCACHE | CFG_SYSSEL;
483 else
484 cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN;
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530485
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +0100486 cfg |= CFG_EAP; /* enable access protection bits check */
487
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100488 writel(cfg, data->sfrbase + REG_MMU_CFG);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530489}
490
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100491static void __sysmmu_enable(struct sysmmu_drvdata *data)
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530492{
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100493 unsigned long flags;
494
Marek Szyprowskifecc49d2016-05-23 11:30:09 +0200495 __sysmmu_enable_clocks(data);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530496
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100497 spin_lock_irqsave(&data->lock, flags);
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100498 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530499 __sysmmu_init_config(data);
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100500 __sysmmu_set_ptbase(data, data->pgtable);
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100501 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100502 data->active = true;
503 spin_unlock_irqrestore(&data->lock, flags);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530504
Marek Szyprowskifecc49d2016-05-23 11:30:09 +0200505 /*
506 * SYSMMU driver keeps master's clock enabled only for the short
507 * time, while accessing the registers. For performing address
508 * translation during DMA transaction it relies on the client
509 * driver to enable it.
510 */
Marek Szyprowskib398af22016-02-18 15:12:51 +0100511 clk_disable(data->clk_master);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530512}
513
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200514static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530515 sysmmu_iova_t iova)
516{
517 unsigned long flags;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530518
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530519 spin_lock_irqsave(&data->lock, flags);
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100520 if (data->active && data->version >= MAKE_MMU_VER(3, 3)) {
Marek Szyprowski01324ab2016-05-23 11:30:08 +0200521 clk_enable(data->clk_master);
Marek Szyprowski7d2aa6b2017-03-20 10:17:56 +0100522 if (sysmmu_block(data)) {
Marek Szyprowskicd37a292017-03-20 10:17:57 +0100523 if (data->version >= MAKE_MMU_VER(5, 0))
524 __sysmmu_tlb_invalidate(data);
525 else
526 __sysmmu_tlb_invalidate_entry(data, iova, 1);
Marek Szyprowski7d2aa6b2017-03-20 10:17:56 +0100527 sysmmu_unblock(data);
528 }
Marek Szyprowski01324ab2016-05-23 11:30:08 +0200529 clk_disable(data->clk_master);
Marek Szyprowskid631ea92016-02-18 15:12:55 +0100530 }
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530531 spin_unlock_irqrestore(&data->lock, flags);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530532}
533
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200534static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
535 sysmmu_iova_t iova, size_t size)
KyongHo Cho2a965362012-05-12 05:56:09 +0900536{
537 unsigned long flags;
KyongHo Cho2a965362012-05-12 05:56:09 +0900538
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530539 spin_lock_irqsave(&data->lock, flags);
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100540 if (data->active) {
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530541 unsigned int num_inv = 1;
Cho KyongHo70605872014-05-12 11:44:55 +0530542
Marek Szyprowskib398af22016-02-18 15:12:51 +0100543 clk_enable(data->clk_master);
Cho KyongHo70605872014-05-12 11:44:55 +0530544
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530545 /*
546 * L2TLB invalidation required
547 * 4KB page: 1 invalidation
Sachin Kamatf171aba2014-08-04 10:06:28 +0530548 * 64KB page: 16 invalidations
549 * 1MB page: 64 invalidations
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530550 * because it is set-associative TLB
551 * with 8-way and 64 sets.
552 * 1MB page can be cached in one of all sets.
553 * 64KB page can be one of 16 consecutive sets.
554 */
Marek Szyprowski512bd0c2015-05-19 15:20:24 +0200555 if (MMU_MAJ_VER(data->version) == 2)
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530556 num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
557
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100558 if (sysmmu_block(data)) {
559 __sysmmu_tlb_invalidate_entry(data, iova, num_inv);
560 sysmmu_unblock(data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900561 }
Marek Szyprowskib398af22016-02-18 15:12:51 +0100562 clk_disable(data->clk_master);
KyongHo Cho2a965362012-05-12 05:56:09 +0900563 }
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530564 spin_unlock_irqrestore(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900565}
566
Arvind Yadav0b9a3692017-08-28 17:42:05 +0530567static const struct iommu_ops exynos_iommu_ops;
Marek Szyprowski96f66552016-05-23 13:01:27 +0200568
Marek Szyprowski7991eb32019-08-12 12:32:46 +0200569static int exynos_sysmmu_probe(struct platform_device *pdev)
KyongHo Cho2a965362012-05-12 05:56:09 +0900570{
Cho KyongHo46c16d12014-05-12 11:44:54 +0530571 int irq, ret;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530572 struct device *dev = &pdev->dev;
KyongHo Cho2a965362012-05-12 05:56:09 +0900573 struct sysmmu_drvdata *data;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530574 struct resource *res;
KyongHo Cho2a965362012-05-12 05:56:09 +0900575
Cho KyongHo46c16d12014-05-12 11:44:54 +0530576 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
577 if (!data)
578 return -ENOMEM;
KyongHo Cho2a965362012-05-12 05:56:09 +0900579
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530580 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Cho KyongHo46c16d12014-05-12 11:44:54 +0530581 data->sfrbase = devm_ioremap_resource(dev, res);
582 if (IS_ERR(data->sfrbase))
583 return PTR_ERR(data->sfrbase);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530584
Cho KyongHo46c16d12014-05-12 11:44:54 +0530585 irq = platform_get_irq(pdev, 0);
Stephen Boyd086f9efa2019-07-30 11:15:22 -0700586 if (irq <= 0)
Cho KyongHo46c16d12014-05-12 11:44:54 +0530587 return irq;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530588
Cho KyongHo46c16d12014-05-12 11:44:54 +0530589 ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530590 dev_name(dev), data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900591 if (ret) {
Cho KyongHo46c16d12014-05-12 11:44:54 +0530592 dev_err(dev, "Unabled to register handler of irq %d\n", irq);
593 return ret;
KyongHo Cho2a965362012-05-12 05:56:09 +0900594 }
595
Cho KyongHo46c16d12014-05-12 11:44:54 +0530596 data->clk = devm_clk_get(dev, "sysmmu");
Marek Szyprowski0c2b0632016-05-23 11:30:07 +0200597 if (PTR_ERR(data->clk) == -ENOENT)
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100598 data->clk = NULL;
Marek Szyprowski0c2b0632016-05-23 11:30:07 +0200599 else if (IS_ERR(data->clk))
600 return PTR_ERR(data->clk);
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100601
602 data->aclk = devm_clk_get(dev, "aclk");
Marek Szyprowski0c2b0632016-05-23 11:30:07 +0200603 if (PTR_ERR(data->aclk) == -ENOENT)
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100604 data->aclk = NULL;
Marek Szyprowski0c2b0632016-05-23 11:30:07 +0200605 else if (IS_ERR(data->aclk))
606 return PTR_ERR(data->aclk);
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100607
608 data->pclk = devm_clk_get(dev, "pclk");
Marek Szyprowski0c2b0632016-05-23 11:30:07 +0200609 if (PTR_ERR(data->pclk) == -ENOENT)
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100610 data->pclk = NULL;
Marek Szyprowski0c2b0632016-05-23 11:30:07 +0200611 else if (IS_ERR(data->pclk))
612 return PTR_ERR(data->pclk);
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100613
614 if (!data->clk && (!data->aclk || !data->pclk)) {
615 dev_err(dev, "Failed to get device clock(s)!\n");
616 return -ENOSYS;
KyongHo Cho2a965362012-05-12 05:56:09 +0900617 }
618
Cho KyongHo70605872014-05-12 11:44:55 +0530619 data->clk_master = devm_clk_get(dev, "master");
Marek Szyprowski0c2b0632016-05-23 11:30:07 +0200620 if (PTR_ERR(data->clk_master) == -ENOENT)
Marek Szyprowskib398af22016-02-18 15:12:51 +0100621 data->clk_master = NULL;
Marek Szyprowski0c2b0632016-05-23 11:30:07 +0200622 else if (IS_ERR(data->clk_master))
623 return PTR_ERR(data->clk_master);
Cho KyongHo70605872014-05-12 11:44:55 +0530624
KyongHo Cho2a965362012-05-12 05:56:09 +0900625 data->sysmmu = dev;
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530626 spin_lock_init(&data->lock);
KyongHo Cho2a965362012-05-12 05:56:09 +0900627
Joerg Roedeld2c302b2017-02-03 13:23:42 +0100628 ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
629 dev_name(data->sysmmu));
630 if (ret)
631 return ret;
632
633 iommu_device_set_ops(&data->iommu, &exynos_iommu_ops);
634 iommu_device_set_fwnode(&data->iommu, &dev->of_node->fwnode);
635
636 ret = iommu_device_register(&data->iommu);
637 if (ret)
638 return ret;
639
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530640 platform_set_drvdata(pdev, data);
641
Marek Szyprowski850d3132016-02-18 15:12:56 +0100642 __sysmmu_get_version(data);
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100643 if (PG_ENT_SHIFT < 0) {
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +0100644 if (MMU_MAJ_VER(data->version) < 5) {
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100645 PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +0100646 LV1_PROT = SYSMMU_LV1_PROT;
647 LV2_PROT = SYSMMU_LV2_PROT;
648 } else {
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100649 PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +0100650 LV1_PROT = SYSMMU_V5_LV1_PROT;
651 LV2_PROT = SYSMMU_V5_LV2_PROT;
652 }
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100653 }
654
Marek Szyprowski928055a2017-08-04 12:28:33 +0200655 /*
656 * use the first registered sysmmu device for performing
657 * dma mapping operations on iommu page tables (cpu cache flush)
658 */
659 if (!dma_dev)
660 dma_dev = &pdev->dev;
661
Cho KyongHof4723ec2014-05-12 11:44:52 +0530662 pm_runtime_enable(dev);
KyongHo Cho2a965362012-05-12 05:56:09 +0900663
KyongHo Cho2a965362012-05-12 05:56:09 +0900664 return 0;
KyongHo Cho2a965362012-05-12 05:56:09 +0900665}
666
Marek Szyprowski9b265532016-11-14 11:08:11 +0100667static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
Marek Szyprowski622015e2015-05-19 15:20:35 +0200668{
669 struct sysmmu_drvdata *data = dev_get_drvdata(dev);
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100670 struct device *master = data->master;
Marek Szyprowski622015e2015-05-19 15:20:35 +0200671
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100672 if (master) {
Marek Szyprowski9b265532016-11-14 11:08:11 +0100673 struct exynos_iommu_owner *owner = master->archdata.iommu;
674
675 mutex_lock(&owner->rpm_lock);
Marek Szyprowski92798b42016-11-14 11:08:09 +0100676 if (data->domain) {
677 dev_dbg(data->sysmmu, "saving state\n");
678 __sysmmu_disable(data);
679 }
Marek Szyprowski9b265532016-11-14 11:08:11 +0100680 mutex_unlock(&owner->rpm_lock);
Marek Szyprowski622015e2015-05-19 15:20:35 +0200681 }
682 return 0;
683}
684
Marek Szyprowski9b265532016-11-14 11:08:11 +0100685static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
Marek Szyprowski622015e2015-05-19 15:20:35 +0200686{
687 struct sysmmu_drvdata *data = dev_get_drvdata(dev);
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100688 struct device *master = data->master;
Marek Szyprowski622015e2015-05-19 15:20:35 +0200689
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100690 if (master) {
Marek Szyprowski9b265532016-11-14 11:08:11 +0100691 struct exynos_iommu_owner *owner = master->archdata.iommu;
692
693 mutex_lock(&owner->rpm_lock);
Marek Szyprowski92798b42016-11-14 11:08:09 +0100694 if (data->domain) {
695 dev_dbg(data->sysmmu, "restoring state\n");
696 __sysmmu_enable(data);
697 }
Marek Szyprowski9b265532016-11-14 11:08:11 +0100698 mutex_unlock(&owner->rpm_lock);
Marek Szyprowski622015e2015-05-19 15:20:35 +0200699 }
700 return 0;
701}
Marek Szyprowski622015e2015-05-19 15:20:35 +0200702
703static const struct dev_pm_ops sysmmu_pm_ops = {
Marek Szyprowski9b265532016-11-14 11:08:11 +0100704 SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume, NULL)
Marek Szyprowski2f5f44f2016-11-14 11:08:12 +0100705 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
706 pm_runtime_force_resume)
Marek Szyprowski622015e2015-05-19 15:20:35 +0200707};
708
Marek Szyprowski9d25e3c2017-10-09 13:40:23 +0200709static const struct of_device_id sysmmu_of_match[] = {
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530710 { .compatible = "samsung,exynos-sysmmu", },
711 { },
712};
713
714static struct platform_driver exynos_sysmmu_driver __refdata = {
715 .probe = exynos_sysmmu_probe,
716 .driver = {
KyongHo Cho2a965362012-05-12 05:56:09 +0900717 .name = "exynos-sysmmu",
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530718 .of_match_table = sysmmu_of_match,
Marek Szyprowski622015e2015-05-19 15:20:35 +0200719 .pm = &sysmmu_pm_ops,
Marek Szyprowskib54b8742016-05-20 15:48:21 +0200720 .suppress_bind_attrs = true,
KyongHo Cho2a965362012-05-12 05:56:09 +0900721 }
722};
723
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100724static inline void update_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
KyongHo Cho2a965362012-05-12 05:56:09 +0900725{
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100726 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
727 DMA_TO_DEVICE);
Ben Dooks6ae53432016-06-08 19:31:10 +0100728 *ent = cpu_to_le32(val);
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100729 dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent),
730 DMA_TO_DEVICE);
KyongHo Cho2a965362012-05-12 05:56:09 +0900731}
732
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100733static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
KyongHo Cho2a965362012-05-12 05:56:09 +0900734{
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200735 struct exynos_iommu_domain *domain;
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100736 dma_addr_t handle;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530737 int i;
KyongHo Cho2a965362012-05-12 05:56:09 +0900738
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100739 /* Check if correct PTE offsets are initialized */
740 BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev);
KyongHo Cho2a965362012-05-12 05:56:09 +0900741
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200742 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
743 if (!domain)
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100744 return NULL;
745
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +0100746 if (type == IOMMU_DOMAIN_DMA) {
747 if (iommu_get_dma_cookie(&domain->domain) != 0)
748 goto err_pgtable;
749 } else if (type != IOMMU_DOMAIN_UNMANAGED) {
750 goto err_pgtable;
751 }
752
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200753 domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
754 if (!domain->pgtable)
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +0100755 goto err_dma_cookie;
KyongHo Cho2a965362012-05-12 05:56:09 +0900756
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200757 domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
758 if (!domain->lv2entcnt)
KyongHo Cho2a965362012-05-12 05:56:09 +0900759 goto err_counter;
760
Sachin Kamatf171aba2014-08-04 10:06:28 +0530761 /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
Marek Szyprowskie7527662017-03-24 10:18:44 +0100762 for (i = 0; i < NUM_LV1ENTRIES; i++)
763 domain->pgtable[i] = ZERO_LV2LINK;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530764
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100765 handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
766 DMA_TO_DEVICE);
767 /* For mapping page table entries we rely on dma == phys */
768 BUG_ON(handle != virt_to_phys(domain->pgtable));
Marek Szyprowski0d6d3da2017-01-09 13:03:54 +0100769 if (dma_mapping_error(dma_dev, handle))
770 goto err_lv2ent;
KyongHo Cho2a965362012-05-12 05:56:09 +0900771
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200772 spin_lock_init(&domain->lock);
773 spin_lock_init(&domain->pgtablelock);
774 INIT_LIST_HEAD(&domain->clients);
KyongHo Cho2a965362012-05-12 05:56:09 +0900775
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200776 domain->domain.geometry.aperture_start = 0;
777 domain->domain.geometry.aperture_end = ~0UL;
778 domain->domain.geometry.force_aperture = true;
Joerg Roedel3177bb72012-07-11 12:41:10 +0200779
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200780 return &domain->domain;
KyongHo Cho2a965362012-05-12 05:56:09 +0900781
Marek Szyprowski0d6d3da2017-01-09 13:03:54 +0100782err_lv2ent:
783 free_pages((unsigned long)domain->lv2entcnt, 1);
KyongHo Cho2a965362012-05-12 05:56:09 +0900784err_counter:
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200785 free_pages((unsigned long)domain->pgtable, 2);
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +0100786err_dma_cookie:
787 if (type == IOMMU_DOMAIN_DMA)
788 iommu_put_dma_cookie(&domain->domain);
KyongHo Cho2a965362012-05-12 05:56:09 +0900789err_pgtable:
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200790 kfree(domain);
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100791 return NULL;
KyongHo Cho2a965362012-05-12 05:56:09 +0900792}
793
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200794static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
KyongHo Cho2a965362012-05-12 05:56:09 +0900795{
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200796 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200797 struct sysmmu_drvdata *data, *next;
KyongHo Cho2a965362012-05-12 05:56:09 +0900798 unsigned long flags;
799 int i;
800
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200801 WARN_ON(!list_empty(&domain->clients));
KyongHo Cho2a965362012-05-12 05:56:09 +0900802
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200803 spin_lock_irqsave(&domain->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900804
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200805 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
Marek Szyprowskie1172302016-11-14 11:08:10 +0100806 spin_lock(&data->lock);
Marek Szyprowskib0d4c862016-11-14 11:08:07 +0100807 __sysmmu_disable(data);
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100808 data->pgtable = 0;
809 data->domain = NULL;
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200810 list_del_init(&data->domain_node);
Marek Szyprowskie1172302016-11-14 11:08:10 +0100811 spin_unlock(&data->lock);
KyongHo Cho2a965362012-05-12 05:56:09 +0900812 }
813
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200814 spin_unlock_irqrestore(&domain->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900815
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +0100816 if (iommu_domain->type == IOMMU_DOMAIN_DMA)
817 iommu_put_dma_cookie(iommu_domain);
818
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100819 dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE,
820 DMA_TO_DEVICE);
821
KyongHo Cho2a965362012-05-12 05:56:09 +0900822 for (i = 0; i < NUM_LV1ENTRIES; i++)
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100823 if (lv1ent_page(domain->pgtable + i)) {
824 phys_addr_t base = lv2table_base(domain->pgtable + i);
825
826 dma_unmap_single(dma_dev, base, LV2TABLE_SIZE,
827 DMA_TO_DEVICE);
Cho KyongHo734c3c72014-05-12 11:44:48 +0530828 kmem_cache_free(lv2table_kmem_cache,
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100829 phys_to_virt(base));
830 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900831
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200832 free_pages((unsigned long)domain->pgtable, 2);
833 free_pages((unsigned long)domain->lv2entcnt, 1);
834 kfree(domain);
KyongHo Cho2a965362012-05-12 05:56:09 +0900835}
836
Marek Szyprowski5fa61cb2016-02-18 15:13:00 +0100837static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
838 struct device *dev)
839{
840 struct exynos_iommu_owner *owner = dev->archdata.iommu;
841 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
842 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
843 struct sysmmu_drvdata *data, *next;
844 unsigned long flags;
Marek Szyprowski5fa61cb2016-02-18 15:13:00 +0100845
846 if (!has_sysmmu(dev) || owner->domain != iommu_domain)
847 return;
848
Marek Szyprowski9b265532016-11-14 11:08:11 +0100849 mutex_lock(&owner->rpm_lock);
850
851 list_for_each_entry(data, &owner->controllers, owner_node) {
852 pm_runtime_get_noresume(data->sysmmu);
853 if (pm_runtime_active(data->sysmmu))
854 __sysmmu_disable(data);
Marek Szyprowskie1172302016-11-14 11:08:10 +0100855 pm_runtime_put(data->sysmmu);
856 }
857
Marek Szyprowski5fa61cb2016-02-18 15:13:00 +0100858 spin_lock_irqsave(&domain->lock, flags);
859 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
Marek Szyprowskie1172302016-11-14 11:08:10 +0100860 spin_lock(&data->lock);
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100861 data->pgtable = 0;
862 data->domain = NULL;
Marek Szyprowskib0d4c862016-11-14 11:08:07 +0100863 list_del_init(&data->domain_node);
Marek Szyprowskie1172302016-11-14 11:08:10 +0100864 spin_unlock(&data->lock);
Marek Szyprowski5fa61cb2016-02-18 15:13:00 +0100865 }
Marek Szyprowskie1172302016-11-14 11:08:10 +0100866 owner->domain = NULL;
Marek Szyprowski5fa61cb2016-02-18 15:13:00 +0100867 spin_unlock_irqrestore(&domain->lock, flags);
868
Marek Szyprowski9b265532016-11-14 11:08:11 +0100869 mutex_unlock(&owner->rpm_lock);
Marek Szyprowski5fa61cb2016-02-18 15:13:00 +0100870
Marek Szyprowskib0d4c862016-11-14 11:08:07 +0100871 dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", __func__,
872 &pagetable);
Marek Szyprowski5fa61cb2016-02-18 15:13:00 +0100873}
874
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200875static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
KyongHo Cho2a965362012-05-12 05:56:09 +0900876 struct device *dev)
877{
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530878 struct exynos_iommu_owner *owner = dev->archdata.iommu;
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200879 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200880 struct sysmmu_drvdata *data;
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200881 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
KyongHo Cho2a965362012-05-12 05:56:09 +0900882 unsigned long flags;
KyongHo Cho2a965362012-05-12 05:56:09 +0900883
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200884 if (!has_sysmmu(dev))
885 return -ENODEV;
KyongHo Cho2a965362012-05-12 05:56:09 +0900886
Marek Szyprowski5fa61cb2016-02-18 15:13:00 +0100887 if (owner->domain)
888 exynos_iommu_detach_device(owner->domain, dev);
889
Marek Szyprowski9b265532016-11-14 11:08:11 +0100890 mutex_lock(&owner->rpm_lock);
891
Marek Szyprowskie1172302016-11-14 11:08:10 +0100892 spin_lock_irqsave(&domain->lock, flags);
Marek Szyprowski1b092052015-05-19 15:20:33 +0200893 list_for_each_entry(data, &owner->controllers, owner_node) {
Marek Szyprowskie1172302016-11-14 11:08:10 +0100894 spin_lock(&data->lock);
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100895 data->pgtable = pagetable;
896 data->domain = domain;
Marek Szyprowskie1172302016-11-14 11:08:10 +0100897 list_add_tail(&data->domain_node, &domain->clients);
898 spin_unlock(&data->lock);
899 }
900 owner->domain = iommu_domain;
901 spin_unlock_irqrestore(&domain->lock, flags);
902
903 list_for_each_entry(data, &owner->controllers, owner_node) {
Marek Szyprowski9b265532016-11-14 11:08:11 +0100904 pm_runtime_get_noresume(data->sysmmu);
905 if (pm_runtime_active(data->sysmmu))
906 __sysmmu_enable(data);
907 pm_runtime_put(data->sysmmu);
908 }
909
910 mutex_unlock(&owner->rpm_lock);
911
Marek Szyprowskib0d4c862016-11-14 11:08:07 +0100912 dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa\n", __func__,
913 &pagetable);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530914
Marek Szyprowskib0d4c862016-11-14 11:08:07 +0100915 return 0;
KyongHo Cho2a965362012-05-12 05:56:09 +0900916}
917
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200918static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530919 sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
KyongHo Cho2a965362012-05-12 05:56:09 +0900920{
Cho KyongHo61128f02014-05-12 11:44:47 +0530921 if (lv1ent_section(sent)) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530922 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
Cho KyongHo61128f02014-05-12 11:44:47 +0530923 return ERR_PTR(-EADDRINUSE);
924 }
925
KyongHo Cho2a965362012-05-12 05:56:09 +0900926 if (lv1ent_fault(sent)) {
Marek Szyprowski0d6d3da2017-01-09 13:03:54 +0100927 dma_addr_t handle;
Cho KyongHod09d78f2014-05-12 11:44:58 +0530928 sysmmu_pte_t *pent;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530929 bool need_flush_flpd_cache = lv1ent_zero(sent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900930
Cho KyongHo734c3c72014-05-12 11:44:48 +0530931 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
Arnd Bergmanndbf6c6e2016-02-29 09:45:59 +0100932 BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1));
KyongHo Cho2a965362012-05-12 05:56:09 +0900933 if (!pent)
Cho KyongHo61128f02014-05-12 11:44:47 +0530934 return ERR_PTR(-ENOMEM);
KyongHo Cho2a965362012-05-12 05:56:09 +0900935
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100936 update_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
Colin Crossdc3814f2015-05-08 17:05:44 -0700937 kmemleak_ignore(pent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900938 *pgcounter = NUM_LV2ENTRIES;
Marek Szyprowski0d6d3da2017-01-09 13:03:54 +0100939 handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE,
940 DMA_TO_DEVICE);
941 if (dma_mapping_error(dma_dev, handle)) {
942 kmem_cache_free(lv2table_kmem_cache, pent);
943 return ERR_PTR(-EADDRINUSE);
944 }
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530945
946 /*
Sachin Kamatf171aba2014-08-04 10:06:28 +0530947 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
948 * FLPD cache may cache the address of zero_l2_table. This
949 * function replaces the zero_l2_table with new L2 page table
950 * to write valid mappings.
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530951 * Accessing the valid area may cause page fault since FLPD
Sachin Kamatf171aba2014-08-04 10:06:28 +0530952 * cache may still cache zero_l2_table for the valid area
953 * instead of new L2 page table that has the mapping
954 * information of the valid area.
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530955 * Thus any replacement of zero_l2_table with other valid L2
956 * page table must involve FLPD cache invalidation for System
957 * MMU v3.3.
958 * FLPD cache invalidation is performed with TLB invalidation
959 * by VPN without blocking. It is safe to invalidate TLB without
960 * blocking because the target address of TLB invalidation is
961 * not currently mapped.
962 */
963 if (need_flush_flpd_cache) {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200964 struct sysmmu_drvdata *data;
Sachin Kamat365409d2014-05-22 09:50:56 +0530965
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200966 spin_lock(&domain->lock);
967 list_for_each_entry(data, &domain->clients, domain_node)
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200968 sysmmu_tlb_invalidate_flpdcache(data, iova);
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200969 spin_unlock(&domain->lock);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530970 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900971 }
972
973 return page_entry(sent, iova);
974}
975
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200976static int lv1set_section(struct exynos_iommu_domain *domain,
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530977 sysmmu_pte_t *sent, sysmmu_iova_t iova,
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +0100978 phys_addr_t paddr, int prot, short *pgcnt)
KyongHo Cho2a965362012-05-12 05:56:09 +0900979{
Cho KyongHo61128f02014-05-12 11:44:47 +0530980 if (lv1ent_section(sent)) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530981 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
Cho KyongHo61128f02014-05-12 11:44:47 +0530982 iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900983 return -EADDRINUSE;
Cho KyongHo61128f02014-05-12 11:44:47 +0530984 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900985
986 if (lv1ent_page(sent)) {
Cho KyongHo61128f02014-05-12 11:44:47 +0530987 if (*pgcnt != NUM_LV2ENTRIES) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530988 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
Cho KyongHo61128f02014-05-12 11:44:47 +0530989 iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900990 return -EADDRINUSE;
Cho KyongHo61128f02014-05-12 11:44:47 +0530991 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900992
Cho KyongHo734c3c72014-05-12 11:44:48 +0530993 kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
KyongHo Cho2a965362012-05-12 05:56:09 +0900994 *pgcnt = 0;
995 }
996
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +0100997 update_pte(sent, mk_lv1ent_sect(paddr, prot));
KyongHo Cho2a965362012-05-12 05:56:09 +0900998
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200999 spin_lock(&domain->lock);
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301000 if (lv1ent_page_zero(sent)) {
Marek Szyprowski469aceb2015-05-19 15:20:27 +02001001 struct sysmmu_drvdata *data;
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301002 /*
1003 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
1004 * entry by speculative prefetch of SLPD which has no mapping.
1005 */
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001006 list_for_each_entry(data, &domain->clients, domain_node)
Marek Szyprowski469aceb2015-05-19 15:20:27 +02001007 sysmmu_tlb_invalidate_flpdcache(data, iova);
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301008 }
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001009 spin_unlock(&domain->lock);
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301010
KyongHo Cho2a965362012-05-12 05:56:09 +09001011 return 0;
1012}
1013
Cho KyongHod09d78f2014-05-12 11:44:58 +05301014static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +01001015 int prot, short *pgcnt)
KyongHo Cho2a965362012-05-12 05:56:09 +09001016{
1017 if (size == SPAGE_SIZE) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301018 if (WARN_ON(!lv2ent_fault(pent)))
KyongHo Cho2a965362012-05-12 05:56:09 +09001019 return -EADDRINUSE;
1020
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +01001021 update_pte(pent, mk_lv2ent_spage(paddr, prot));
KyongHo Cho2a965362012-05-12 05:56:09 +09001022 *pgcnt -= 1;
1023 } else { /* size == LPAGE_SIZE */
1024 int i;
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001025 dma_addr_t pent_base = virt_to_phys(pent);
Sachin Kamat365409d2014-05-22 09:50:56 +05301026
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001027 dma_sync_single_for_cpu(dma_dev, pent_base,
1028 sizeof(*pent) * SPAGES_PER_LPAGE,
1029 DMA_TO_DEVICE);
KyongHo Cho2a965362012-05-12 05:56:09 +09001030 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301031 if (WARN_ON(!lv2ent_fault(pent))) {
Cho KyongHo61128f02014-05-12 11:44:47 +05301032 if (i > 0)
1033 memset(pent - i, 0, sizeof(*pent) * i);
KyongHo Cho2a965362012-05-12 05:56:09 +09001034 return -EADDRINUSE;
1035 }
1036
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +01001037 *pent = mk_lv2ent_lpage(paddr, prot);
KyongHo Cho2a965362012-05-12 05:56:09 +09001038 }
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001039 dma_sync_single_for_device(dma_dev, pent_base,
1040 sizeof(*pent) * SPAGES_PER_LPAGE,
1041 DMA_TO_DEVICE);
KyongHo Cho2a965362012-05-12 05:56:09 +09001042 *pgcnt -= SPAGES_PER_LPAGE;
1043 }
1044
1045 return 0;
1046}
1047
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301048/*
1049 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
1050 *
Sachin Kamatf171aba2014-08-04 10:06:28 +05301051 * System MMU v3.x has advanced logic to improve address translation
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301052 * performance with caching more page table entries by a page table walk.
Sachin Kamatf171aba2014-08-04 10:06:28 +05301053 * However, the logic has a bug that while caching faulty page table entries,
1054 * System MMU reports page fault if the cached fault entry is hit even though
1055 * the fault entry is updated to a valid entry after the entry is cached.
1056 * To prevent caching faulty page table entries which may be updated to valid
1057 * entries later, the virtual memory manager should care about the workaround
1058 * for the problem. The following describes the workaround.
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301059 *
1060 * Any two consecutive I/O virtual address regions must have a hole of 128KiB
Sachin Kamatf171aba2014-08-04 10:06:28 +05301061 * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301062 *
Sachin Kamatf171aba2014-08-04 10:06:28 +05301063 * Precisely, any start address of I/O virtual region must be aligned with
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301064 * the following sizes for System MMU v3.1 and v3.2.
1065 * System MMU v3.1: 128KiB
1066 * System MMU v3.2: 256KiB
1067 *
1068 * Because System MMU v3.3 caches page table entries more aggressively, it needs
Sachin Kamatf171aba2014-08-04 10:06:28 +05301069 * more workarounds.
1070 * - Any two consecutive I/O virtual regions must have a hole of size larger
1071 * than or equal to 128KiB.
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301072 * - Start address of an I/O virtual region must be aligned by 128KiB.
1073 */
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001074static int exynos_iommu_map(struct iommu_domain *iommu_domain,
1075 unsigned long l_iova, phys_addr_t paddr, size_t size,
Tom Murphy781ca2d2019-09-08 09:56:38 -07001076 int prot, gfp_t gfp)
KyongHo Cho2a965362012-05-12 05:56:09 +09001077{
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001078 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Cho KyongHod09d78f2014-05-12 11:44:58 +05301079 sysmmu_pte_t *entry;
1080 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
KyongHo Cho2a965362012-05-12 05:56:09 +09001081 unsigned long flags;
1082 int ret = -ENOMEM;
1083
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001084 BUG_ON(domain->pgtable == NULL);
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +01001085 prot &= SYSMMU_SUPPORTED_PROT_BITS;
KyongHo Cho2a965362012-05-12 05:56:09 +09001086
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001087 spin_lock_irqsave(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001088
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001089 entry = section_entry(domain->pgtable, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +09001090
1091 if (size == SECT_SIZE) {
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +01001092 ret = lv1set_section(domain, entry, iova, paddr, prot,
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001093 &domain->lv2entcnt[lv1ent_offset(iova)]);
KyongHo Cho2a965362012-05-12 05:56:09 +09001094 } else {
Cho KyongHod09d78f2014-05-12 11:44:58 +05301095 sysmmu_pte_t *pent;
KyongHo Cho2a965362012-05-12 05:56:09 +09001096
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001097 pent = alloc_lv2entry(domain, entry, iova,
1098 &domain->lv2entcnt[lv1ent_offset(iova)]);
KyongHo Cho2a965362012-05-12 05:56:09 +09001099
Cho KyongHo61128f02014-05-12 11:44:47 +05301100 if (IS_ERR(pent))
1101 ret = PTR_ERR(pent);
KyongHo Cho2a965362012-05-12 05:56:09 +09001102 else
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +01001103 ret = lv2set_page(pent, paddr, size, prot,
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001104 &domain->lv2entcnt[lv1ent_offset(iova)]);
KyongHo Cho2a965362012-05-12 05:56:09 +09001105 }
1106
Cho KyongHo61128f02014-05-12 11:44:47 +05301107 if (ret)
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301108 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
1109 __func__, ret, size, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +09001110
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001111 spin_unlock_irqrestore(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001112
1113 return ret;
1114}
1115
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001116static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain,
1117 sysmmu_iova_t iova, size_t size)
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301118{
Marek Szyprowski469aceb2015-05-19 15:20:27 +02001119 struct sysmmu_drvdata *data;
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301120 unsigned long flags;
1121
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001122 spin_lock_irqsave(&domain->lock, flags);
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301123
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001124 list_for_each_entry(data, &domain->clients, domain_node)
Marek Szyprowski469aceb2015-05-19 15:20:27 +02001125 sysmmu_tlb_invalidate_entry(data, iova, size);
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301126
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001127 spin_unlock_irqrestore(&domain->lock, flags);
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301128}
1129
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001130static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
Will Deacon56f8af52019-07-02 16:44:06 +01001131 unsigned long l_iova, size_t size,
1132 struct iommu_iotlb_gather *gather)
KyongHo Cho2a965362012-05-12 05:56:09 +09001133{
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001134 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Cho KyongHod09d78f2014-05-12 11:44:58 +05301135 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1136 sysmmu_pte_t *ent;
Cho KyongHo61128f02014-05-12 11:44:47 +05301137 size_t err_pgsize;
Cho KyongHod09d78f2014-05-12 11:44:58 +05301138 unsigned long flags;
KyongHo Cho2a965362012-05-12 05:56:09 +09001139
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001140 BUG_ON(domain->pgtable == NULL);
KyongHo Cho2a965362012-05-12 05:56:09 +09001141
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001142 spin_lock_irqsave(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001143
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001144 ent = section_entry(domain->pgtable, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +09001145
1146 if (lv1ent_section(ent)) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301147 if (WARN_ON(size < SECT_SIZE)) {
Cho KyongHo61128f02014-05-12 11:44:47 +05301148 err_pgsize = SECT_SIZE;
1149 goto err;
1150 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001151
Sachin Kamatf171aba2014-08-04 10:06:28 +05301152 /* workaround for h/w bug in System MMU v3.3 */
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001153 update_pte(ent, ZERO_LV2LINK);
KyongHo Cho2a965362012-05-12 05:56:09 +09001154 size = SECT_SIZE;
1155 goto done;
1156 }
1157
1158 if (unlikely(lv1ent_fault(ent))) {
1159 if (size > SECT_SIZE)
1160 size = SECT_SIZE;
1161 goto done;
1162 }
1163
1164 /* lv1ent_page(sent) == true here */
1165
1166 ent = page_entry(ent, iova);
1167
1168 if (unlikely(lv2ent_fault(ent))) {
1169 size = SPAGE_SIZE;
1170 goto done;
1171 }
1172
1173 if (lv2ent_small(ent)) {
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001174 update_pte(ent, 0);
KyongHo Cho2a965362012-05-12 05:56:09 +09001175 size = SPAGE_SIZE;
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001176 domain->lv2entcnt[lv1ent_offset(iova)] += 1;
KyongHo Cho2a965362012-05-12 05:56:09 +09001177 goto done;
1178 }
1179
1180 /* lv1ent_large(ent) == true here */
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301181 if (WARN_ON(size < LPAGE_SIZE)) {
Cho KyongHo61128f02014-05-12 11:44:47 +05301182 err_pgsize = LPAGE_SIZE;
1183 goto err;
1184 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001185
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001186 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent),
1187 sizeof(*ent) * SPAGES_PER_LPAGE,
1188 DMA_TO_DEVICE);
KyongHo Cho2a965362012-05-12 05:56:09 +09001189 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001190 dma_sync_single_for_device(dma_dev, virt_to_phys(ent),
1191 sizeof(*ent) * SPAGES_PER_LPAGE,
1192 DMA_TO_DEVICE);
KyongHo Cho2a965362012-05-12 05:56:09 +09001193 size = LPAGE_SIZE;
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001194 domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
KyongHo Cho2a965362012-05-12 05:56:09 +09001195done:
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001196 spin_unlock_irqrestore(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001197
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001198 exynos_iommu_tlb_invalidate_entry(domain, iova, size);
KyongHo Cho2a965362012-05-12 05:56:09 +09001199
KyongHo Cho2a965362012-05-12 05:56:09 +09001200 return size;
Cho KyongHo61128f02014-05-12 11:44:47 +05301201err:
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001202 spin_unlock_irqrestore(&domain->pgtablelock, flags);
Cho KyongHo61128f02014-05-12 11:44:47 +05301203
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301204 pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1205 __func__, size, iova, err_pgsize);
Cho KyongHo61128f02014-05-12 11:44:47 +05301206
1207 return 0;
KyongHo Cho2a965362012-05-12 05:56:09 +09001208}
1209
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001210static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
Varun Sethibb5547a2013-03-29 01:23:58 +05301211 dma_addr_t iova)
KyongHo Cho2a965362012-05-12 05:56:09 +09001212{
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001213 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Cho KyongHod09d78f2014-05-12 11:44:58 +05301214 sysmmu_pte_t *entry;
KyongHo Cho2a965362012-05-12 05:56:09 +09001215 unsigned long flags;
1216 phys_addr_t phys = 0;
1217
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001218 spin_lock_irqsave(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001219
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001220 entry = section_entry(domain->pgtable, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +09001221
1222 if (lv1ent_section(entry)) {
1223 phys = section_phys(entry) + section_offs(iova);
1224 } else if (lv1ent_page(entry)) {
1225 entry = page_entry(entry, iova);
1226
1227 if (lv2ent_large(entry))
1228 phys = lpage_phys(entry) + lpage_offs(iova);
1229 else if (lv2ent_small(entry))
1230 phys = spage_phys(entry) + spage_offs(iova);
1231 }
1232
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001233 spin_unlock_irqrestore(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001234
1235 return phys;
1236}
1237
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301238static int exynos_iommu_add_device(struct device *dev)
1239{
Marek Szyprowski7a974b22017-09-15 13:05:08 +02001240 struct exynos_iommu_owner *owner = dev->archdata.iommu;
1241 struct sysmmu_drvdata *data;
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301242 struct iommu_group *group;
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301243
Marek Szyprowski06801db2015-05-19 15:20:32 +02001244 if (!has_sysmmu(dev))
1245 return -ENODEV;
1246
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001247 group = iommu_group_get_for_dev(dev);
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301248
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001249 if (IS_ERR(group))
1250 return PTR_ERR(group);
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301251
Marek Szyprowski7a974b22017-09-15 13:05:08 +02001252 list_for_each_entry(data, &owner->controllers, owner_node) {
1253 /*
1254 * SYSMMU will be runtime activated via device link
1255 * (dependency) to its master device, so there are no
1256 * direct calls to pm_runtime_get/put in this driver.
1257 */
1258 data->link = device_link_add(dev, data->sysmmu,
Rafael J. Wysockiea4f6402019-02-01 01:54:21 +01001259 DL_FLAG_STATELESS |
Marek Szyprowski7a974b22017-09-15 13:05:08 +02001260 DL_FLAG_PM_RUNTIME);
1261 }
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301262 iommu_group_put(group);
1263
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001264 return 0;
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301265}
1266
1267static void exynos_iommu_remove_device(struct device *dev)
1268{
Marek Szyprowskifff2fd12017-01-09 13:03:56 +01001269 struct exynos_iommu_owner *owner = dev->archdata.iommu;
Marek Szyprowski7a974b22017-09-15 13:05:08 +02001270 struct sysmmu_drvdata *data;
Marek Szyprowskifff2fd12017-01-09 13:03:56 +01001271
Marek Szyprowski06801db2015-05-19 15:20:32 +02001272 if (!has_sysmmu(dev))
1273 return;
1274
Marek Szyprowskifff2fd12017-01-09 13:03:56 +01001275 if (owner->domain) {
1276 struct iommu_group *group = iommu_group_get(dev);
1277
1278 if (group) {
1279 WARN_ON(owner->domain !=
1280 iommu_group_default_domain(group));
1281 exynos_iommu_detach_device(owner->domain, dev);
1282 iommu_group_put(group);
1283 }
1284 }
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301285 iommu_group_remove_device(dev);
Marek Szyprowski7a974b22017-09-15 13:05:08 +02001286
1287 list_for_each_entry(data, &owner->controllers, owner_node)
1288 device_link_del(data->link);
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301289}
1290
Marek Szyprowskiaa759fd2015-05-19 15:20:37 +02001291static int exynos_iommu_of_xlate(struct device *dev,
1292 struct of_phandle_args *spec)
1293{
1294 struct exynos_iommu_owner *owner = dev->archdata.iommu;
1295 struct platform_device *sysmmu = of_find_device_by_node(spec->np);
Marek Szyprowski0bd5a0c2017-01-09 13:03:55 +01001296 struct sysmmu_drvdata *data, *entry;
Marek Szyprowskiaa759fd2015-05-19 15:20:37 +02001297
1298 if (!sysmmu)
1299 return -ENODEV;
1300
1301 data = platform_get_drvdata(sysmmu);
1302 if (!data)
1303 return -ENODEV;
1304
1305 if (!owner) {
1306 owner = kzalloc(sizeof(*owner), GFP_KERNEL);
1307 if (!owner)
1308 return -ENOMEM;
1309
1310 INIT_LIST_HEAD(&owner->controllers);
Marek Szyprowski9b265532016-11-14 11:08:11 +01001311 mutex_init(&owner->rpm_lock);
Marek Szyprowskiaa759fd2015-05-19 15:20:37 +02001312 dev->archdata.iommu = owner;
1313 }
1314
Marek Szyprowski0bd5a0c2017-01-09 13:03:55 +01001315 list_for_each_entry(entry, &owner->controllers, owner_node)
1316 if (entry == data)
1317 return 0;
1318
Marek Szyprowskiaa759fd2015-05-19 15:20:37 +02001319 list_add_tail(&data->owner_node, &owner->controllers);
Marek Szyprowski92798b42016-11-14 11:08:09 +01001320 data->master = dev;
Marek Szyprowski2f5f44f2016-11-14 11:08:12 +01001321
Marek Szyprowskiaa759fd2015-05-19 15:20:37 +02001322 return 0;
1323}
1324
Arvind Yadav0b9a3692017-08-28 17:42:05 +05301325static const struct iommu_ops exynos_iommu_ops = {
Joerg Roedele1fd1ea2015-03-26 13:43:11 +01001326 .domain_alloc = exynos_iommu_domain_alloc,
1327 .domain_free = exynos_iommu_domain_free,
Bjorn Helgaasba5fa6f2014-05-08 14:49:14 -06001328 .attach_dev = exynos_iommu_attach_device,
1329 .detach_dev = exynos_iommu_detach_device,
1330 .map = exynos_iommu_map,
1331 .unmap = exynos_iommu_unmap,
1332 .iova_to_phys = exynos_iommu_iova_to_phys,
Robin Murphy6d7cf022018-01-24 14:22:09 +00001333 .device_group = generic_device_group,
Bjorn Helgaasba5fa6f2014-05-08 14:49:14 -06001334 .add_device = exynos_iommu_add_device,
1335 .remove_device = exynos_iommu_remove_device,
KyongHo Cho2a965362012-05-12 05:56:09 +09001336 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
Marek Szyprowskiaa759fd2015-05-19 15:20:37 +02001337 .of_xlate = exynos_iommu_of_xlate,
KyongHo Cho2a965362012-05-12 05:56:09 +09001338};
1339
1340static int __init exynos_iommu_init(void)
1341{
Robin Murphydc98b842018-01-09 15:34:07 +00001342 struct device_node *np;
KyongHo Cho2a965362012-05-12 05:56:09 +09001343 int ret;
1344
Robin Murphydc98b842018-01-09 15:34:07 +00001345 np = of_find_matching_node(NULL, sysmmu_of_match);
1346 if (!np)
1347 return 0;
1348
1349 of_node_put(np);
1350
Cho KyongHo734c3c72014-05-12 11:44:48 +05301351 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1352 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1353 if (!lv2table_kmem_cache) {
1354 pr_err("%s: Failed to create kmem cache\n", __func__);
1355 return -ENOMEM;
1356 }
1357
KyongHo Cho2a965362012-05-12 05:56:09 +09001358 ret = platform_driver_register(&exynos_sysmmu_driver);
Cho KyongHo734c3c72014-05-12 11:44:48 +05301359 if (ret) {
1360 pr_err("%s: Failed to register driver\n", __func__);
1361 goto err_reg_driver;
1362 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001363
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301364 zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1365 if (zero_lv2_table == NULL) {
1366 pr_err("%s: Failed to allocate zero level2 page table\n",
1367 __func__);
1368 ret = -ENOMEM;
1369 goto err_zero_lv2;
1370 }
1371
Cho KyongHo734c3c72014-05-12 11:44:48 +05301372 ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1373 if (ret) {
1374 pr_err("%s: Failed to register exynos-iommu driver.\n",
1375 __func__);
1376 goto err_set_iommu;
1377 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001378
Cho KyongHo734c3c72014-05-12 11:44:48 +05301379 return 0;
1380err_set_iommu:
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301381 kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
1382err_zero_lv2:
Cho KyongHo734c3c72014-05-12 11:44:48 +05301383 platform_driver_unregister(&exynos_sysmmu_driver);
1384err_reg_driver:
1385 kmem_cache_destroy(lv2table_kmem_cache);
KyongHo Cho2a965362012-05-12 05:56:09 +09001386 return ret;
1387}
Marek Szyprowski928055a2017-08-04 12:28:33 +02001388core_initcall(exynos_iommu_init);