blob: e2852b041231167f4e19e332f61895403407fdac [file] [log] [blame]
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 */
6
Daniel Kurtzc68a2922014-11-03 10:53:27 +08007#include <linux/compiler.h>
8#include <linux/delay.h>
9#include <linux/device.h>
Shunqian Zheng4f0aba62016-06-24 10:13:29 +080010#include <linux/dma-iommu.h>
Joerg Roedel461a6942017-04-26 15:46:20 +020011#include <linux/dma-mapping.h>
Daniel Kurtzc68a2922014-11-03 10:53:27 +080012#include <linux/errno.h>
13#include <linux/interrupt.h>
14#include <linux/io.h>
15#include <linux/iommu.h>
16#include <linux/jiffies.h>
17#include <linux/list.h>
18#include <linux/mm.h>
19#include <linux/module.h>
20#include <linux/of.h>
21#include <linux/of_platform.h>
22#include <linux/platform_device.h>
23#include <linux/slab.h>
24#include <linux/spinlock.h>
25
26/** MMU register offsets */
27#define RK_MMU_DTE_ADDR 0x00 /* Directory table address */
28#define RK_MMU_STATUS 0x04
29#define RK_MMU_COMMAND 0x08
30#define RK_MMU_PAGE_FAULT_ADDR 0x0C /* IOVA of last page fault */
31#define RK_MMU_ZAP_ONE_LINE 0x10 /* Shootdown one IOTLB entry */
32#define RK_MMU_INT_RAWSTAT 0x14 /* IRQ status ignoring mask */
33#define RK_MMU_INT_CLEAR 0x18 /* Acknowledge and re-arm irq */
34#define RK_MMU_INT_MASK 0x1C /* IRQ enable */
35#define RK_MMU_INT_STATUS 0x20 /* IRQ status after masking */
36#define RK_MMU_AUTO_GATING 0x24
37
38#define DTE_ADDR_DUMMY 0xCAFEBABE
39#define FORCE_RESET_TIMEOUT 100 /* ms */
40
41/* RK_MMU_STATUS fields */
42#define RK_MMU_STATUS_PAGING_ENABLED BIT(0)
43#define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1)
44#define RK_MMU_STATUS_STALL_ACTIVE BIT(2)
45#define RK_MMU_STATUS_IDLE BIT(3)
46#define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4)
47#define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5)
48#define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31)
49
50/* RK_MMU_COMMAND command values */
51#define RK_MMU_CMD_ENABLE_PAGING 0 /* Enable memory translation */
52#define RK_MMU_CMD_DISABLE_PAGING 1 /* Disable memory translation */
53#define RK_MMU_CMD_ENABLE_STALL 2 /* Stall paging to allow other cmds */
54#define RK_MMU_CMD_DISABLE_STALL 3 /* Stop stall re-enables paging */
55#define RK_MMU_CMD_ZAP_CACHE 4 /* Shoot down entire IOTLB */
56#define RK_MMU_CMD_PAGE_FAULT_DONE 5 /* Clear page fault */
57#define RK_MMU_CMD_FORCE_RESET 6 /* Reset all registers */
58
59/* RK_MMU_INT_* register fields */
60#define RK_MMU_IRQ_PAGE_FAULT 0x01 /* page fault */
61#define RK_MMU_IRQ_BUS_ERROR 0x02 /* bus read error */
62#define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR)
63
64#define NUM_DT_ENTRIES 1024
65#define NUM_PT_ENTRIES 1024
66
67#define SPAGE_ORDER 12
68#define SPAGE_SIZE (1 << SPAGE_ORDER)
69
70 /*
71 * Support mapping any size that fits in one page table:
72 * 4 KiB to 4 MiB
73 */
74#define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
75
76#define IOMMU_REG_POLL_COUNT_FAST 1000
77
78struct rk_iommu_domain {
79 struct list_head iommus;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +080080 struct platform_device *pdev;
Daniel Kurtzc68a2922014-11-03 10:53:27 +080081 u32 *dt; /* page directory table */
Shunqian Zheng4f0aba62016-06-24 10:13:29 +080082 dma_addr_t dt_dma;
Daniel Kurtzc68a2922014-11-03 10:53:27 +080083 spinlock_t iommus_lock; /* lock for iommus list */
84 spinlock_t dt_lock; /* lock for modifying page directory table */
Joerg Roedelbcd516a2015-03-26 13:43:17 +010085
86 struct iommu_domain domain;
Daniel Kurtzc68a2922014-11-03 10:53:27 +080087};
88
89struct rk_iommu {
90 struct device *dev;
ZhengShunQiancd6438c2016-01-19 15:03:00 +080091 void __iomem **bases;
92 int num_mmu;
Simon Xue03f732f2017-07-24 10:37:14 +080093 int *irq;
94 int num_irq;
Joerg Roedelc9d9f232017-03-31 16:26:03 +020095 struct iommu_device iommu;
Daniel Kurtzc68a2922014-11-03 10:53:27 +080096 struct list_head node; /* entry in rk_iommu_domain.iommus */
97 struct iommu_domain *domain; /* domain to which iommu is attached */
98};
99
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800100static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
101 unsigned int count)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800102{
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800103 size_t size = count * sizeof(u32); /* count of u32 entry */
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800104
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800105 dma_sync_single_for_device(&dom->pdev->dev, dma, size, DMA_TO_DEVICE);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800106}
107
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100108static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
109{
110 return container_of(dom, struct rk_iommu_domain, domain);
111}
112
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800113/**
114 * Inspired by _wait_for in intel_drv.h
115 * This is NOT safe for use in interrupt context.
116 *
117 * Note that it's important that we check the condition again after having
118 * timed out, since the timeout could be due to preemption or similar and
119 * we've never had a chance to check the condition before the timeout.
120 */
121#define rk_wait_for(COND, MS) ({ \
122 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
123 int ret__ = 0; \
124 while (!(COND)) { \
125 if (time_after(jiffies, timeout__)) { \
126 ret__ = (COND) ? 0 : -ETIMEDOUT; \
127 break; \
128 } \
129 usleep_range(50, 100); \
130 } \
131 ret__; \
132})
133
134/*
135 * The Rockchip rk3288 iommu uses a 2-level page table.
136 * The first level is the "Directory Table" (DT).
137 * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing
138 * to a "Page Table".
139 * The second level is the 1024 Page Tables (PT).
140 * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to
141 * a 4 KB page of physical memory.
142 *
143 * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries).
144 * Each iommu device has a MMU_DTE_ADDR register that contains the physical
145 * address of the start of the DT page.
146 *
147 * The structure of the page table is as follows:
148 *
149 * DT
150 * MMU_DTE_ADDR -> +-----+
151 * | |
152 * +-----+ PT
153 * | DTE | -> +-----+
154 * +-----+ | | Memory
155 * | | +-----+ Page
156 * | | | PTE | -> +-----+
157 * +-----+ +-----+ | |
158 * | | | |
159 * | | | |
160 * +-----+ | |
161 * | |
162 * | |
163 * +-----+
164 */
165
166/*
167 * Each DTE has a PT address and a valid bit:
168 * +---------------------+-----------+-+
169 * | PT address | Reserved |V|
170 * +---------------------+-----------+-+
171 * 31:12 - PT address (PTs always starts on a 4 KB boundary)
172 * 11: 1 - Reserved
173 * 0 - 1 if PT @ PT address is valid
174 */
175#define RK_DTE_PT_ADDRESS_MASK 0xfffff000
176#define RK_DTE_PT_VALID BIT(0)
177
178static inline phys_addr_t rk_dte_pt_address(u32 dte)
179{
180 return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
181}
182
183static inline bool rk_dte_is_pt_valid(u32 dte)
184{
185 return dte & RK_DTE_PT_VALID;
186}
187
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800188static inline u32 rk_mk_dte(dma_addr_t pt_dma)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800189{
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800190 return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800191}
192
193/*
194 * Each PTE has a Page address, some flags and a valid bit:
195 * +---------------------+---+-------+-+
196 * | Page address |Rsv| Flags |V|
197 * +---------------------+---+-------+-+
198 * 31:12 - Page address (Pages always start on a 4 KB boundary)
199 * 11: 9 - Reserved
200 * 8: 1 - Flags
201 * 8 - Read allocate - allocate cache space on read misses
202 * 7 - Read cache - enable cache & prefetch of data
203 * 6 - Write buffer - enable delaying writes on their way to memory
204 * 5 - Write allocate - allocate cache space on write misses
205 * 4 - Write cache - different writes can be merged together
206 * 3 - Override cache attributes
207 * if 1, bits 4-8 control cache attributes
208 * if 0, the system bus defaults are used
209 * 2 - Writable
210 * 1 - Readable
211 * 0 - 1 if Page @ Page address is valid
212 */
213#define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000
214#define RK_PTE_PAGE_FLAGS_MASK 0x000001fe
215#define RK_PTE_PAGE_WRITABLE BIT(2)
216#define RK_PTE_PAGE_READABLE BIT(1)
217#define RK_PTE_PAGE_VALID BIT(0)
218
219static inline phys_addr_t rk_pte_page_address(u32 pte)
220{
221 return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
222}
223
224static inline bool rk_pte_is_page_valid(u32 pte)
225{
226 return pte & RK_PTE_PAGE_VALID;
227}
228
229/* TODO: set cache flags per prot IOMMU_CACHE */
230static u32 rk_mk_pte(phys_addr_t page, int prot)
231{
232 u32 flags = 0;
233 flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
234 flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
235 page &= RK_PTE_PAGE_ADDRESS_MASK;
236 return page | flags | RK_PTE_PAGE_VALID;
237}
238
239static u32 rk_mk_pte_invalid(u32 pte)
240{
241 return pte & ~RK_PTE_PAGE_VALID;
242}
243
244/*
245 * rk3288 iova (IOMMU Virtual Address) format
246 * 31 22.21 12.11 0
247 * +-----------+-----------+-------------+
248 * | DTE index | PTE index | Page offset |
249 * +-----------+-----------+-------------+
250 * 31:22 - DTE index - index of DTE in DT
251 * 21:12 - PTE index - index of PTE in PT @ DTE.pt_address
252 * 11: 0 - Page offset - offset into page @ PTE.page_address
253 */
254#define RK_IOVA_DTE_MASK 0xffc00000
255#define RK_IOVA_DTE_SHIFT 22
256#define RK_IOVA_PTE_MASK 0x003ff000
257#define RK_IOVA_PTE_SHIFT 12
258#define RK_IOVA_PAGE_MASK 0x00000fff
259#define RK_IOVA_PAGE_SHIFT 0
260
261static u32 rk_iova_dte_index(dma_addr_t iova)
262{
263 return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
264}
265
266static u32 rk_iova_pte_index(dma_addr_t iova)
267{
268 return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
269}
270
271static u32 rk_iova_page_offset(dma_addr_t iova)
272{
273 return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
274}
275
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800276static u32 rk_iommu_read(void __iomem *base, u32 offset)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800277{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800278 return readl(base + offset);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800279}
280
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800281static void rk_iommu_write(void __iomem *base, u32 offset, u32 value)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800282{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800283 writel(value, base + offset);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800284}
285
286static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
287{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800288 int i;
289
290 for (i = 0; i < iommu->num_mmu; i++)
291 writel(command, iommu->bases[i] + RK_MMU_COMMAND);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800292}
293
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800294static void rk_iommu_base_command(void __iomem *base, u32 command)
295{
296 writel(command, base + RK_MMU_COMMAND);
297}
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800298static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova,
299 size_t size)
300{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800301 int i;
302
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800303 dma_addr_t iova_end = iova + size;
304 /*
305 * TODO(djkurtz): Figure out when it is more efficient to shootdown the
306 * entire iotlb rather than iterate over individual iovas.
307 */
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800308 for (i = 0; i < iommu->num_mmu; i++)
309 for (; iova < iova_end; iova += SPAGE_SIZE)
310 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800311}
312
313static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
314{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800315 bool active = true;
316 int i;
317
318 for (i = 0; i < iommu->num_mmu; i++)
John Keepingfbedd9b2016-04-05 15:05:46 +0100319 active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
320 RK_MMU_STATUS_STALL_ACTIVE);
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800321
322 return active;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800323}
324
325static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
326{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800327 bool enable = true;
328 int i;
329
330 for (i = 0; i < iommu->num_mmu; i++)
John Keepingfbedd9b2016-04-05 15:05:46 +0100331 enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
332 RK_MMU_STATUS_PAGING_ENABLED);
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800333
334 return enable;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800335}
336
337static int rk_iommu_enable_stall(struct rk_iommu *iommu)
338{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800339 int ret, i;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800340
341 if (rk_iommu_is_stall_active(iommu))
342 return 0;
343
344 /* Stall can only be enabled if paging is enabled */
345 if (!rk_iommu_is_paging_enabled(iommu))
346 return 0;
347
348 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
349
350 ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1);
351 if (ret)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800352 for (i = 0; i < iommu->num_mmu; i++)
353 dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
354 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800355
356 return ret;
357}
358
359static int rk_iommu_disable_stall(struct rk_iommu *iommu)
360{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800361 int ret, i;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800362
363 if (!rk_iommu_is_stall_active(iommu))
364 return 0;
365
366 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
367
368 ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1);
369 if (ret)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800370 for (i = 0; i < iommu->num_mmu; i++)
371 dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
372 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800373
374 return ret;
375}
376
377static int rk_iommu_enable_paging(struct rk_iommu *iommu)
378{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800379 int ret, i;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800380
381 if (rk_iommu_is_paging_enabled(iommu))
382 return 0;
383
384 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
385
386 ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1);
387 if (ret)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800388 for (i = 0; i < iommu->num_mmu; i++)
389 dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
390 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800391
392 return ret;
393}
394
395static int rk_iommu_disable_paging(struct rk_iommu *iommu)
396{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800397 int ret, i;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800398
399 if (!rk_iommu_is_paging_enabled(iommu))
400 return 0;
401
402 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
403
404 ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1);
405 if (ret)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800406 for (i = 0; i < iommu->num_mmu; i++)
407 dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
408 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800409
410 return ret;
411}
412
413static int rk_iommu_force_reset(struct rk_iommu *iommu)
414{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800415 int ret, i;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800416 u32 dte_addr;
417
418 /*
419 * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
420 * and verifying that upper 5 nybbles are read back.
421 */
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800422 for (i = 0; i < iommu->num_mmu; i++) {
423 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800424
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800425 dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR);
426 if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
427 dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
428 return -EFAULT;
429 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800430 }
431
432 rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
433
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800434 for (i = 0; i < iommu->num_mmu; i++) {
435 ret = rk_wait_for(rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0x00000000,
436 FORCE_RESET_TIMEOUT);
437 if (ret) {
438 dev_err(iommu->dev, "FORCE_RESET command timed out\n");
439 return ret;
440 }
441 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800442
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800443 return 0;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800444}
445
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800446static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800447{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800448 void __iomem *base = iommu->bases[index];
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800449 u32 dte_index, pte_index, page_offset;
450 u32 mmu_dte_addr;
451 phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
452 u32 *dte_addr;
453 u32 dte;
454 phys_addr_t pte_addr_phys = 0;
455 u32 *pte_addr = NULL;
456 u32 pte = 0;
457 phys_addr_t page_addr_phys = 0;
458 u32 page_flags = 0;
459
460 dte_index = rk_iova_dte_index(iova);
461 pte_index = rk_iova_pte_index(iova);
462 page_offset = rk_iova_page_offset(iova);
463
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800464 mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800465 mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
466
467 dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
468 dte_addr = phys_to_virt(dte_addr_phys);
469 dte = *dte_addr;
470
471 if (!rk_dte_is_pt_valid(dte))
472 goto print_it;
473
474 pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
475 pte_addr = phys_to_virt(pte_addr_phys);
476 pte = *pte_addr;
477
478 if (!rk_pte_is_page_valid(pte))
479 goto print_it;
480
481 page_addr_phys = rk_pte_page_address(pte) + page_offset;
482 page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
483
484print_it:
485 dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
486 &iova, dte_index, pte_index, page_offset);
487 dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
488 &mmu_dte_addr_phys, &dte_addr_phys, dte,
489 rk_dte_is_pt_valid(dte), &pte_addr_phys, pte,
490 rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
491}
492
493static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
494{
495 struct rk_iommu *iommu = dev_id;
496 u32 status;
497 u32 int_status;
498 dma_addr_t iova;
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800499 irqreturn_t ret = IRQ_NONE;
500 int i;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800501
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800502 for (i = 0; i < iommu->num_mmu; i++) {
503 int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
504 if (int_status == 0)
505 continue;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800506
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800507 ret = IRQ_HANDLED;
508 iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800509
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800510 if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
511 int flags;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800512
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800513 status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS);
514 flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
515 IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800516
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800517 dev_err(iommu->dev, "Page fault at %pad of type %s\n",
518 &iova,
519 (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800520
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800521 log_iova(iommu, i, iova);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800522
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800523 /*
524 * Report page fault to any installed handlers.
525 * Ignore the return code, though, since we always zap cache
526 * and clear the page fault anyway.
527 */
528 if (iommu->domain)
529 report_iommu_fault(iommu->domain, iommu->dev, iova,
530 flags);
531 else
532 dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800533
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800534 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
535 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
536 }
537
538 if (int_status & RK_MMU_IRQ_BUS_ERROR)
539 dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
540
541 if (int_status & ~RK_MMU_IRQ_MASK)
542 dev_err(iommu->dev, "unexpected int_status: %#08x\n",
543 int_status);
544
545 rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800546 }
547
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800548 return ret;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800549}
550
551static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
552 dma_addr_t iova)
553{
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100554 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800555 unsigned long flags;
556 phys_addr_t pt_phys, phys = 0;
557 u32 dte, pte;
558 u32 *page_table;
559
560 spin_lock_irqsave(&rk_domain->dt_lock, flags);
561
562 dte = rk_domain->dt[rk_iova_dte_index(iova)];
563 if (!rk_dte_is_pt_valid(dte))
564 goto out;
565
566 pt_phys = rk_dte_pt_address(dte);
567 page_table = (u32 *)phys_to_virt(pt_phys);
568 pte = page_table[rk_iova_pte_index(iova)];
569 if (!rk_pte_is_page_valid(pte))
570 goto out;
571
572 phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
573out:
574 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
575
576 return phys;
577}
578
579static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
580 dma_addr_t iova, size_t size)
581{
582 struct list_head *pos;
583 unsigned long flags;
584
585 /* shootdown these iova from all iommus using this domain */
586 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
587 list_for_each(pos, &rk_domain->iommus) {
588 struct rk_iommu *iommu;
589 iommu = list_entry(pos, struct rk_iommu, node);
590 rk_iommu_zap_lines(iommu, iova, size);
591 }
592 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
593}
594
Tomasz Figad4dd9202015-04-20 20:43:44 +0900595static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
596 dma_addr_t iova, size_t size)
597{
598 rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
599 if (size > SPAGE_SIZE)
600 rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE,
601 SPAGE_SIZE);
602}
603
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800604static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
605 dma_addr_t iova)
606{
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800607 struct device *dev = &rk_domain->pdev->dev;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800608 u32 *page_table, *dte_addr;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800609 u32 dte_index, dte;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800610 phys_addr_t pt_phys;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800611 dma_addr_t pt_dma;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800612
613 assert_spin_locked(&rk_domain->dt_lock);
614
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800615 dte_index = rk_iova_dte_index(iova);
616 dte_addr = &rk_domain->dt[dte_index];
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800617 dte = *dte_addr;
618 if (rk_dte_is_pt_valid(dte))
619 goto done;
620
621 page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
622 if (!page_table)
623 return ERR_PTR(-ENOMEM);
624
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800625 pt_dma = dma_map_single(dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
626 if (dma_mapping_error(dev, pt_dma)) {
627 dev_err(dev, "DMA mapping error while allocating page table\n");
628 free_page((unsigned long)page_table);
629 return ERR_PTR(-ENOMEM);
630 }
631
632 dte = rk_mk_dte(pt_dma);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800633 *dte_addr = dte;
634
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800635 rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES);
636 rk_table_flush(rk_domain,
637 rk_domain->dt_dma + dte_index * sizeof(u32), 1);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800638done:
639 pt_phys = rk_dte_pt_address(dte);
640 return (u32 *)phys_to_virt(pt_phys);
641}
642
643static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800644 u32 *pte_addr, dma_addr_t pte_dma,
645 size_t size)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800646{
647 unsigned int pte_count;
648 unsigned int pte_total = size / SPAGE_SIZE;
649
650 assert_spin_locked(&rk_domain->dt_lock);
651
652 for (pte_count = 0; pte_count < pte_total; pte_count++) {
653 u32 pte = pte_addr[pte_count];
654 if (!rk_pte_is_page_valid(pte))
655 break;
656
657 pte_addr[pte_count] = rk_mk_pte_invalid(pte);
658 }
659
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800660 rk_table_flush(rk_domain, pte_dma, pte_count);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800661
662 return pte_count * SPAGE_SIZE;
663}
664
665static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800666 dma_addr_t pte_dma, dma_addr_t iova,
667 phys_addr_t paddr, size_t size, int prot)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800668{
669 unsigned int pte_count;
670 unsigned int pte_total = size / SPAGE_SIZE;
671 phys_addr_t page_phys;
672
673 assert_spin_locked(&rk_domain->dt_lock);
674
675 for (pte_count = 0; pte_count < pte_total; pte_count++) {
676 u32 pte = pte_addr[pte_count];
677
678 if (rk_pte_is_page_valid(pte))
679 goto unwind;
680
681 pte_addr[pte_count] = rk_mk_pte(paddr, prot);
682
683 paddr += SPAGE_SIZE;
684 }
685
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800686 rk_table_flush(rk_domain, pte_dma, pte_total);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800687
Tomasz Figad4dd9202015-04-20 20:43:44 +0900688 /*
689 * Zap the first and last iova to evict from iotlb any previously
690 * mapped cachelines holding stale values for its dte and pte.
691 * We only zap the first and last iova, since only they could have
692 * dte or pte shared with an existing mapping.
693 */
694 rk_iommu_zap_iova_first_last(rk_domain, iova, size);
695
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800696 return 0;
697unwind:
698 /* Unmap the range of iovas that we just mapped */
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800699 rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
700 pte_count * SPAGE_SIZE);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800701
702 iova += pte_count * SPAGE_SIZE;
703 page_phys = rk_pte_page_address(pte_addr[pte_count]);
704 pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
705 &iova, &page_phys, &paddr, prot);
706
707 return -EADDRINUSE;
708}
709
710static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
711 phys_addr_t paddr, size_t size, int prot)
712{
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100713 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800714 unsigned long flags;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800715 dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800716 u32 *page_table, *pte_addr;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800717 u32 dte_index, pte_index;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800718 int ret;
719
720 spin_lock_irqsave(&rk_domain->dt_lock, flags);
721
722 /*
723 * pgsize_bitmap specifies iova sizes that fit in one page table
724 * (1024 4-KiB pages = 4 MiB).
725 * So, size will always be 4096 <= size <= 4194304.
726 * Since iommu_map() guarantees that both iova and size will be
727 * aligned, we will always only be mapping from a single dte here.
728 */
729 page_table = rk_dte_get_page_table(rk_domain, iova);
730 if (IS_ERR(page_table)) {
731 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
732 return PTR_ERR(page_table);
733 }
734
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800735 dte_index = rk_domain->dt[rk_iova_dte_index(iova)];
736 pte_index = rk_iova_pte_index(iova);
737 pte_addr = &page_table[pte_index];
738 pte_dma = rk_dte_pt_address(dte_index) + pte_index * sizeof(u32);
739 ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
740 paddr, size, prot);
741
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800742 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
743
744 return ret;
745}
746
747static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
748 size_t size)
749{
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100750 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800751 unsigned long flags;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800752 dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800753 phys_addr_t pt_phys;
754 u32 dte;
755 u32 *pte_addr;
756 size_t unmap_size;
757
758 spin_lock_irqsave(&rk_domain->dt_lock, flags);
759
760 /*
761 * pgsize_bitmap specifies iova sizes that fit in one page table
762 * (1024 4-KiB pages = 4 MiB).
763 * So, size will always be 4096 <= size <= 4194304.
764 * Since iommu_unmap() guarantees that both iova and size will be
765 * aligned, we will always only be unmapping from a single dte here.
766 */
767 dte = rk_domain->dt[rk_iova_dte_index(iova)];
768 /* Just return 0 if iova is unmapped */
769 if (!rk_dte_is_pt_valid(dte)) {
770 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
771 return 0;
772 }
773
774 pt_phys = rk_dte_pt_address(dte);
775 pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800776 pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
777 unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800778
779 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
780
781 /* Shootdown iotlb entries for iova range that was just unmapped */
782 rk_iommu_zap_iova(rk_domain, iova, unmap_size);
783
784 return unmap_size;
785}
786
787static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
788{
789 struct iommu_group *group;
790 struct device *iommu_dev;
791 struct rk_iommu *rk_iommu;
792
793 group = iommu_group_get(dev);
794 if (!group)
795 return NULL;
796 iommu_dev = iommu_group_get_iommudata(group);
797 rk_iommu = dev_get_drvdata(iommu_dev);
798 iommu_group_put(group);
799
800 return rk_iommu;
801}
802
803static int rk_iommu_attach_device(struct iommu_domain *domain,
804 struct device *dev)
805{
806 struct rk_iommu *iommu;
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100807 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800808 unsigned long flags;
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800809 int ret, i;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800810
811 /*
812 * Allow 'virtual devices' (e.g., drm) to attach to domain.
813 * Such a device does not belong to an iommu group.
814 */
815 iommu = rk_iommu_from_dev(dev);
816 if (!iommu)
817 return 0;
818
819 ret = rk_iommu_enable_stall(iommu);
820 if (ret)
821 return ret;
822
823 ret = rk_iommu_force_reset(iommu);
824 if (ret)
825 return ret;
826
827 iommu->domain = domain;
828
Simon Xue03f732f2017-07-24 10:37:14 +0800829 for (i = 0; i < iommu->num_irq; i++) {
830 ret = devm_request_irq(iommu->dev, iommu->irq[i], rk_iommu_irq,
831 IRQF_SHARED, dev_name(dev), iommu);
832 if (ret)
833 return ret;
834 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800835
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800836 for (i = 0; i < iommu->num_mmu; i++) {
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800837 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
838 rk_domain->dt_dma);
John Keepingae8a7912016-06-01 16:46:10 +0100839 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800840 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
841 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800842
843 ret = rk_iommu_enable_paging(iommu);
844 if (ret)
845 return ret;
846
847 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
848 list_add_tail(&iommu->node, &rk_domain->iommus);
849 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
850
Heiko Stuebnerec4292d2015-05-21 09:57:29 +0200851 dev_dbg(dev, "Attached to iommu domain\n");
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800852
853 rk_iommu_disable_stall(iommu);
854
855 return 0;
856}
857
858static void rk_iommu_detach_device(struct iommu_domain *domain,
859 struct device *dev)
860{
861 struct rk_iommu *iommu;
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100862 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800863 unsigned long flags;
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800864 int i;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800865
866 /* Allow 'virtual devices' (eg drm) to detach from domain */
867 iommu = rk_iommu_from_dev(dev);
868 if (!iommu)
869 return;
870
871 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
872 list_del_init(&iommu->node);
873 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
874
875 /* Ignore error while disabling, just keep going */
876 rk_iommu_enable_stall(iommu);
877 rk_iommu_disable_paging(iommu);
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800878 for (i = 0; i < iommu->num_mmu; i++) {
879 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
880 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
881 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800882 rk_iommu_disable_stall(iommu);
883
Simon Xue03f732f2017-07-24 10:37:14 +0800884 for (i = 0; i < iommu->num_irq; i++)
885 devm_free_irq(iommu->dev, iommu->irq[i], iommu);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800886
887 iommu->domain = NULL;
888
Heiko Stuebnerec4292d2015-05-21 09:57:29 +0200889 dev_dbg(dev, "Detached from iommu domain\n");
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800890}
891
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100892static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800893{
894 struct rk_iommu_domain *rk_domain;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800895 struct platform_device *pdev;
896 struct device *iommu_dev;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800897
Shunqian Zhenga93db2f2016-06-24 10:13:30 +0800898 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100899 return NULL;
900
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800901 /* Register a pdev per domain, so DMA API can base on this *dev
902 * even some virtual master doesn't have an iommu slave
903 */
904 pdev = platform_device_register_simple("rk_iommu_domain",
905 PLATFORM_DEVID_AUTO, NULL, 0);
906 if (IS_ERR(pdev))
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100907 return NULL;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800908
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800909 rk_domain = devm_kzalloc(&pdev->dev, sizeof(*rk_domain), GFP_KERNEL);
910 if (!rk_domain)
911 goto err_unreg_pdev;
912
913 rk_domain->pdev = pdev;
914
Shunqian Zhenga93db2f2016-06-24 10:13:30 +0800915 if (type == IOMMU_DOMAIN_DMA &&
916 iommu_get_dma_cookie(&rk_domain->domain))
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800917 goto err_unreg_pdev;
918
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800919 /*
920 * rk32xx iommus use a 2 level pagetable.
921 * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
922 * Allocate one 4 KiB page for each table.
923 */
924 rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
925 if (!rk_domain->dt)
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800926 goto err_put_cookie;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800927
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800928 iommu_dev = &pdev->dev;
929 rk_domain->dt_dma = dma_map_single(iommu_dev, rk_domain->dt,
930 SPAGE_SIZE, DMA_TO_DEVICE);
931 if (dma_mapping_error(iommu_dev, rk_domain->dt_dma)) {
932 dev_err(iommu_dev, "DMA map error for DT\n");
933 goto err_free_dt;
934 }
935
936 rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800937
938 spin_lock_init(&rk_domain->iommus_lock);
939 spin_lock_init(&rk_domain->dt_lock);
940 INIT_LIST_HEAD(&rk_domain->iommus);
941
Shunqian Zhenga93db2f2016-06-24 10:13:30 +0800942 rk_domain->domain.geometry.aperture_start = 0;
943 rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32);
944 rk_domain->domain.geometry.force_aperture = true;
945
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100946 return &rk_domain->domain;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800947
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800948err_free_dt:
949 free_page((unsigned long)rk_domain->dt);
950err_put_cookie:
Shunqian Zhenga93db2f2016-06-24 10:13:30 +0800951 if (type == IOMMU_DOMAIN_DMA)
952 iommu_put_dma_cookie(&rk_domain->domain);
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800953err_unreg_pdev:
954 platform_device_unregister(pdev);
955
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100956 return NULL;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800957}
958
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100959static void rk_iommu_domain_free(struct iommu_domain *domain)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800960{
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100961 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800962 int i;
963
964 WARN_ON(!list_empty(&rk_domain->iommus));
965
966 for (i = 0; i < NUM_DT_ENTRIES; i++) {
967 u32 dte = rk_domain->dt[i];
968 if (rk_dte_is_pt_valid(dte)) {
969 phys_addr_t pt_phys = rk_dte_pt_address(dte);
970 u32 *page_table = phys_to_virt(pt_phys);
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800971 dma_unmap_single(&rk_domain->pdev->dev, pt_phys,
972 SPAGE_SIZE, DMA_TO_DEVICE);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800973 free_page((unsigned long)page_table);
974 }
975 }
976
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800977 dma_unmap_single(&rk_domain->pdev->dev, rk_domain->dt_dma,
978 SPAGE_SIZE, DMA_TO_DEVICE);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800979 free_page((unsigned long)rk_domain->dt);
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800980
Shunqian Zhenga93db2f2016-06-24 10:13:30 +0800981 if (domain->type == IOMMU_DOMAIN_DMA)
982 iommu_put_dma_cookie(&rk_domain->domain);
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800983
984 platform_device_unregister(rk_domain->pdev);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800985}
986
987static bool rk_iommu_is_dev_iommu_master(struct device *dev)
988{
989 struct device_node *np = dev->of_node;
990 int ret;
991
992 /*
993 * An iommu master has an iommus property containing a list of phandles
994 * to iommu nodes, each with an #iommu-cells property with value 0.
995 */
996 ret = of_count_phandle_with_args(np, "iommus", "#iommu-cells");
997 return (ret > 0);
998}
999
1000static int rk_iommu_group_set_iommudata(struct iommu_group *group,
1001 struct device *dev)
1002{
1003 struct device_node *np = dev->of_node;
1004 struct platform_device *pd;
1005 int ret;
1006 struct of_phandle_args args;
1007
1008 /*
1009 * An iommu master has an iommus property containing a list of phandles
1010 * to iommu nodes, each with an #iommu-cells property with value 0.
1011 */
1012 ret = of_parse_phandle_with_args(np, "iommus", "#iommu-cells", 0,
1013 &args);
1014 if (ret) {
1015 dev_err(dev, "of_parse_phandle_with_args(%s) => %d\n",
1016 np->full_name, ret);
1017 return ret;
1018 }
1019 if (args.args_count != 0) {
1020 dev_err(dev, "incorrect number of iommu params found for %s (found %d, expected 0)\n",
1021 args.np->full_name, args.args_count);
1022 return -EINVAL;
1023 }
1024
1025 pd = of_find_device_by_node(args.np);
1026 of_node_put(args.np);
1027 if (!pd) {
1028 dev_err(dev, "iommu %s not found\n", args.np->full_name);
1029 return -EPROBE_DEFER;
1030 }
1031
1032 /* TODO(djkurtz): handle multiple slave iommus for a single master */
1033 iommu_group_set_iommudata(group, &pd->dev, NULL);
1034
1035 return 0;
1036}
1037
1038static int rk_iommu_add_device(struct device *dev)
1039{
1040 struct iommu_group *group;
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001041 struct rk_iommu *iommu;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001042 int ret;
1043
1044 if (!rk_iommu_is_dev_iommu_master(dev))
1045 return -ENODEV;
1046
1047 group = iommu_group_get(dev);
1048 if (!group) {
1049 group = iommu_group_alloc();
1050 if (IS_ERR(group)) {
1051 dev_err(dev, "Failed to allocate IOMMU group\n");
1052 return PTR_ERR(group);
1053 }
1054 }
1055
1056 ret = iommu_group_add_device(group, dev);
1057 if (ret)
1058 goto err_put_group;
1059
1060 ret = rk_iommu_group_set_iommudata(group, dev);
1061 if (ret)
1062 goto err_remove_device;
1063
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001064 iommu = rk_iommu_from_dev(dev);
1065 if (iommu)
1066 iommu_device_link(&iommu->iommu, dev);
1067
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001068 iommu_group_put(group);
1069
1070 return 0;
1071
1072err_remove_device:
1073 iommu_group_remove_device(dev);
1074err_put_group:
1075 iommu_group_put(group);
1076 return ret;
1077}
1078
1079static void rk_iommu_remove_device(struct device *dev)
1080{
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001081 struct rk_iommu *iommu;
1082
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001083 if (!rk_iommu_is_dev_iommu_master(dev))
1084 return;
1085
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001086 iommu = rk_iommu_from_dev(dev);
1087 if (iommu)
1088 iommu_device_unlink(&iommu->iommu, dev);
1089
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001090 iommu_group_remove_device(dev);
1091}
1092
1093static const struct iommu_ops rk_iommu_ops = {
Joerg Roedelbcd516a2015-03-26 13:43:17 +01001094 .domain_alloc = rk_iommu_domain_alloc,
1095 .domain_free = rk_iommu_domain_free,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001096 .attach_dev = rk_iommu_attach_device,
1097 .detach_dev = rk_iommu_detach_device,
1098 .map = rk_iommu_map,
1099 .unmap = rk_iommu_unmap,
Simon Xuee6d0f472016-06-24 10:13:27 +08001100 .map_sg = default_iommu_map_sg,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001101 .add_device = rk_iommu_add_device,
1102 .remove_device = rk_iommu_remove_device,
1103 .iova_to_phys = rk_iommu_iova_to_phys,
1104 .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
1105};
1106
Shunqian Zheng4f0aba62016-06-24 10:13:29 +08001107static int rk_iommu_domain_probe(struct platform_device *pdev)
1108{
1109 struct device *dev = &pdev->dev;
1110
1111 dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
1112 if (!dev->dma_parms)
1113 return -ENOMEM;
1114
1115 /* Set dma_ops for dev, otherwise it would be dummy_dma_ops */
1116 arch_setup_dma_ops(dev, 0, DMA_BIT_MASK(32), NULL, false);
1117
1118 dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
1119 dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
1120
1121 return 0;
1122}
1123
1124static struct platform_driver rk_iommu_domain_driver = {
1125 .probe = rk_iommu_domain_probe,
1126 .driver = {
1127 .name = "rk_iommu_domain",
1128 },
1129};
1130
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001131static int rk_iommu_probe(struct platform_device *pdev)
1132{
1133 struct device *dev = &pdev->dev;
1134 struct rk_iommu *iommu;
1135 struct resource *res;
Shunqian Zheng3d08f4342016-06-24 10:13:28 +08001136 int num_res = pdev->num_resources;
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001137 int err, i;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001138
1139 iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
1140 if (!iommu)
1141 return -ENOMEM;
1142
1143 platform_set_drvdata(pdev, iommu);
1144 iommu->dev = dev;
ZhengShunQiancd6438c2016-01-19 15:03:00 +08001145 iommu->num_mmu = 0;
Shunqian Zheng3d08f4342016-06-24 10:13:28 +08001146
1147 iommu->bases = devm_kzalloc(dev, sizeof(*iommu->bases) * num_res,
ZhengShunQiancd6438c2016-01-19 15:03:00 +08001148 GFP_KERNEL);
1149 if (!iommu->bases)
1150 return -ENOMEM;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001151
Shunqian Zheng3d08f4342016-06-24 10:13:28 +08001152 for (i = 0; i < num_res; i++) {
ZhengShunQiancd6438c2016-01-19 15:03:00 +08001153 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
Tomeu Vizoso8d7f2d82016-03-21 12:00:23 +01001154 if (!res)
1155 continue;
ZhengShunQiancd6438c2016-01-19 15:03:00 +08001156 iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
1157 if (IS_ERR(iommu->bases[i]))
1158 continue;
1159 iommu->num_mmu++;
1160 }
1161 if (iommu->num_mmu == 0)
1162 return PTR_ERR(iommu->bases[0]);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001163
Simon Xue03f732f2017-07-24 10:37:14 +08001164 iommu->num_irq = platform_irq_count(pdev);
1165 if (iommu->num_irq < 0)
1166 return iommu->num_irq;
1167 if (iommu->num_irq == 0)
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001168 return -ENXIO;
Simon Xue03f732f2017-07-24 10:37:14 +08001169
1170 iommu->irq = devm_kcalloc(dev, iommu->num_irq, sizeof(*iommu->irq),
1171 GFP_KERNEL);
1172 if (!iommu->irq)
1173 return -ENOMEM;
1174
1175 for (i = 0; i < iommu->num_irq; i++) {
1176 iommu->irq[i] = platform_get_irq(pdev, i);
1177 if (iommu->irq[i] < 0) {
1178 dev_err(dev, "Failed to get IRQ, %d\n", iommu->irq[i]);
1179 return -ENXIO;
1180 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001181 }
1182
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001183 err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
1184 if (err)
1185 return err;
1186
1187 iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
1188 err = iommu_device_register(&iommu->iommu);
1189
1190 return err;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001191}
1192
1193static int rk_iommu_remove(struct platform_device *pdev)
1194{
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001195 struct rk_iommu *iommu = platform_get_drvdata(pdev);
1196
1197 if (iommu) {
1198 iommu_device_sysfs_remove(&iommu->iommu);
1199 iommu_device_unregister(&iommu->iommu);
1200 }
1201
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001202 return 0;
1203}
1204
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001205static const struct of_device_id rk_iommu_dt_ids[] = {
1206 { .compatible = "rockchip,iommu" },
1207 { /* sentinel */ }
1208};
1209MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001210
1211static struct platform_driver rk_iommu_driver = {
1212 .probe = rk_iommu_probe,
1213 .remove = rk_iommu_remove,
1214 .driver = {
1215 .name = "rk_iommu",
Arnd Bergmannd9e7eb12015-04-10 23:58:24 +02001216 .of_match_table = rk_iommu_dt_ids,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001217 },
1218};
1219
1220static int __init rk_iommu_init(void)
1221{
Thierry Reding425061b2015-02-06 11:44:07 +01001222 struct device_node *np;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001223 int ret;
1224
Thierry Reding425061b2015-02-06 11:44:07 +01001225 np = of_find_matching_node(NULL, rk_iommu_dt_ids);
1226 if (!np)
1227 return 0;
1228
1229 of_node_put(np);
1230
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001231 ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
1232 if (ret)
1233 return ret;
1234
Shunqian Zheng4f0aba62016-06-24 10:13:29 +08001235 ret = platform_driver_register(&rk_iommu_domain_driver);
1236 if (ret)
1237 return ret;
1238
1239 ret = platform_driver_register(&rk_iommu_driver);
1240 if (ret)
1241 platform_driver_unregister(&rk_iommu_domain_driver);
1242 return ret;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001243}
1244static void __exit rk_iommu_exit(void)
1245{
1246 platform_driver_unregister(&rk_iommu_driver);
Shunqian Zheng4f0aba62016-06-24 10:13:29 +08001247 platform_driver_unregister(&rk_iommu_domain_driver);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001248}
1249
1250subsys_initcall(rk_iommu_init);
1251module_exit(rk_iommu_exit);
1252
1253MODULE_DESCRIPTION("IOMMU API for Rockchip");
1254MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>");
1255MODULE_ALIAS("platform:rockchip-iommu");
1256MODULE_LICENSE("GPL v2");