blob: 50ab9fd4eeb07fbacb3265bb6fcc5f77180c2c6f [file] [log] [blame]
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 */
6
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08007#include <linux/clk.h>
Daniel Kurtzc68a2922014-11-03 10:53:27 +08008#include <linux/compiler.h>
9#include <linux/delay.h>
10#include <linux/device.h>
Shunqian Zheng4f0aba62016-06-24 10:13:29 +080011#include <linux/dma-iommu.h>
Joerg Roedel461a6942017-04-26 15:46:20 +020012#include <linux/dma-mapping.h>
Daniel Kurtzc68a2922014-11-03 10:53:27 +080013#include <linux/errno.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <linux/iommu.h>
Tomasz Figa0416bf62018-03-23 15:38:05 +080017#include <linux/iopoll.h>
Daniel Kurtzc68a2922014-11-03 10:53:27 +080018#include <linux/list.h>
19#include <linux/mm.h>
20#include <linux/module.h>
21#include <linux/of.h>
22#include <linux/of_platform.h>
23#include <linux/platform_device.h>
24#include <linux/slab.h>
25#include <linux/spinlock.h>
26
27/** MMU register offsets */
28#define RK_MMU_DTE_ADDR 0x00 /* Directory table address */
29#define RK_MMU_STATUS 0x04
30#define RK_MMU_COMMAND 0x08
31#define RK_MMU_PAGE_FAULT_ADDR 0x0C /* IOVA of last page fault */
32#define RK_MMU_ZAP_ONE_LINE 0x10 /* Shootdown one IOTLB entry */
33#define RK_MMU_INT_RAWSTAT 0x14 /* IRQ status ignoring mask */
34#define RK_MMU_INT_CLEAR 0x18 /* Acknowledge and re-arm irq */
35#define RK_MMU_INT_MASK 0x1C /* IRQ enable */
36#define RK_MMU_INT_STATUS 0x20 /* IRQ status after masking */
37#define RK_MMU_AUTO_GATING 0x24
38
39#define DTE_ADDR_DUMMY 0xCAFEBABE
Tomasz Figa0416bf62018-03-23 15:38:05 +080040
41#define RK_MMU_POLL_PERIOD_US 100
42#define RK_MMU_FORCE_RESET_TIMEOUT_US 100000
43#define RK_MMU_POLL_TIMEOUT_US 1000
Daniel Kurtzc68a2922014-11-03 10:53:27 +080044
45/* RK_MMU_STATUS fields */
46#define RK_MMU_STATUS_PAGING_ENABLED BIT(0)
47#define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1)
48#define RK_MMU_STATUS_STALL_ACTIVE BIT(2)
49#define RK_MMU_STATUS_IDLE BIT(3)
50#define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4)
51#define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5)
52#define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31)
53
54/* RK_MMU_COMMAND command values */
55#define RK_MMU_CMD_ENABLE_PAGING 0 /* Enable memory translation */
56#define RK_MMU_CMD_DISABLE_PAGING 1 /* Disable memory translation */
57#define RK_MMU_CMD_ENABLE_STALL 2 /* Stall paging to allow other cmds */
58#define RK_MMU_CMD_DISABLE_STALL 3 /* Stop stall re-enables paging */
59#define RK_MMU_CMD_ZAP_CACHE 4 /* Shoot down entire IOTLB */
60#define RK_MMU_CMD_PAGE_FAULT_DONE 5 /* Clear page fault */
61#define RK_MMU_CMD_FORCE_RESET 6 /* Reset all registers */
62
63/* RK_MMU_INT_* register fields */
64#define RK_MMU_IRQ_PAGE_FAULT 0x01 /* page fault */
65#define RK_MMU_IRQ_BUS_ERROR 0x02 /* bus read error */
66#define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR)
67
68#define NUM_DT_ENTRIES 1024
69#define NUM_PT_ENTRIES 1024
70
71#define SPAGE_ORDER 12
72#define SPAGE_SIZE (1 << SPAGE_ORDER)
73
74 /*
75 * Support mapping any size that fits in one page table:
76 * 4 KiB to 4 MiB
77 */
78#define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
79
Daniel Kurtzc68a2922014-11-03 10:53:27 +080080struct rk_iommu_domain {
81 struct list_head iommus;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +080082 struct platform_device *pdev;
Daniel Kurtzc68a2922014-11-03 10:53:27 +080083 u32 *dt; /* page directory table */
Shunqian Zheng4f0aba62016-06-24 10:13:29 +080084 dma_addr_t dt_dma;
Daniel Kurtzc68a2922014-11-03 10:53:27 +080085 spinlock_t iommus_lock; /* lock for iommus list */
86 spinlock_t dt_lock; /* lock for modifying page directory table */
Joerg Roedelbcd516a2015-03-26 13:43:17 +010087
88 struct iommu_domain domain;
Daniel Kurtzc68a2922014-11-03 10:53:27 +080089};
90
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +080091/* list of clocks required by IOMMU */
92static const char * const rk_iommu_clocks[] = {
93 "aclk", "iface",
94};
95
Daniel Kurtzc68a2922014-11-03 10:53:27 +080096struct rk_iommu {
97 struct device *dev;
ZhengShunQiancd6438c2016-01-19 15:03:00 +080098 void __iomem **bases;
99 int num_mmu;
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800100 struct clk_bulk_data *clocks;
101 int num_clocks;
Simon Xuec3aa4742017-07-24 10:37:15 +0800102 bool reset_disabled;
Joerg Roedelc9d9f232017-03-31 16:26:03 +0200103 struct iommu_device iommu;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800104 struct list_head node; /* entry in rk_iommu_domain.iommus */
105 struct iommu_domain *domain; /* domain to which iommu is attached */
106};
107
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800108static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
109 unsigned int count)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800110{
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800111 size_t size = count * sizeof(u32); /* count of u32 entry */
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800112
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800113 dma_sync_single_for_device(&dom->pdev->dev, dma, size, DMA_TO_DEVICE);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800114}
115
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100116static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
117{
118 return container_of(dom, struct rk_iommu_domain, domain);
119}
120
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800121/*
122 * The Rockchip rk3288 iommu uses a 2-level page table.
123 * The first level is the "Directory Table" (DT).
124 * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing
125 * to a "Page Table".
126 * The second level is the 1024 Page Tables (PT).
127 * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to
128 * a 4 KB page of physical memory.
129 *
130 * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries).
131 * Each iommu device has a MMU_DTE_ADDR register that contains the physical
132 * address of the start of the DT page.
133 *
134 * The structure of the page table is as follows:
135 *
136 * DT
137 * MMU_DTE_ADDR -> +-----+
138 * | |
139 * +-----+ PT
140 * | DTE | -> +-----+
141 * +-----+ | | Memory
142 * | | +-----+ Page
143 * | | | PTE | -> +-----+
144 * +-----+ +-----+ | |
145 * | | | |
146 * | | | |
147 * +-----+ | |
148 * | |
149 * | |
150 * +-----+
151 */
152
153/*
154 * Each DTE has a PT address and a valid bit:
155 * +---------------------+-----------+-+
156 * | PT address | Reserved |V|
157 * +---------------------+-----------+-+
158 * 31:12 - PT address (PTs always starts on a 4 KB boundary)
159 * 11: 1 - Reserved
160 * 0 - 1 if PT @ PT address is valid
161 */
162#define RK_DTE_PT_ADDRESS_MASK 0xfffff000
163#define RK_DTE_PT_VALID BIT(0)
164
165static inline phys_addr_t rk_dte_pt_address(u32 dte)
166{
167 return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
168}
169
170static inline bool rk_dte_is_pt_valid(u32 dte)
171{
172 return dte & RK_DTE_PT_VALID;
173}
174
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800175static inline u32 rk_mk_dte(dma_addr_t pt_dma)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800176{
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800177 return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800178}
179
180/*
181 * Each PTE has a Page address, some flags and a valid bit:
182 * +---------------------+---+-------+-+
183 * | Page address |Rsv| Flags |V|
184 * +---------------------+---+-------+-+
185 * 31:12 - Page address (Pages always start on a 4 KB boundary)
186 * 11: 9 - Reserved
187 * 8: 1 - Flags
188 * 8 - Read allocate - allocate cache space on read misses
189 * 7 - Read cache - enable cache & prefetch of data
190 * 6 - Write buffer - enable delaying writes on their way to memory
191 * 5 - Write allocate - allocate cache space on write misses
192 * 4 - Write cache - different writes can be merged together
193 * 3 - Override cache attributes
194 * if 1, bits 4-8 control cache attributes
195 * if 0, the system bus defaults are used
196 * 2 - Writable
197 * 1 - Readable
198 * 0 - 1 if Page @ Page address is valid
199 */
200#define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000
201#define RK_PTE_PAGE_FLAGS_MASK 0x000001fe
202#define RK_PTE_PAGE_WRITABLE BIT(2)
203#define RK_PTE_PAGE_READABLE BIT(1)
204#define RK_PTE_PAGE_VALID BIT(0)
205
206static inline phys_addr_t rk_pte_page_address(u32 pte)
207{
208 return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
209}
210
211static inline bool rk_pte_is_page_valid(u32 pte)
212{
213 return pte & RK_PTE_PAGE_VALID;
214}
215
216/* TODO: set cache flags per prot IOMMU_CACHE */
217static u32 rk_mk_pte(phys_addr_t page, int prot)
218{
219 u32 flags = 0;
220 flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
221 flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
222 page &= RK_PTE_PAGE_ADDRESS_MASK;
223 return page | flags | RK_PTE_PAGE_VALID;
224}
225
226static u32 rk_mk_pte_invalid(u32 pte)
227{
228 return pte & ~RK_PTE_PAGE_VALID;
229}
230
231/*
232 * rk3288 iova (IOMMU Virtual Address) format
233 * 31 22.21 12.11 0
234 * +-----------+-----------+-------------+
235 * | DTE index | PTE index | Page offset |
236 * +-----------+-----------+-------------+
237 * 31:22 - DTE index - index of DTE in DT
238 * 21:12 - PTE index - index of PTE in PT @ DTE.pt_address
239 * 11: 0 - Page offset - offset into page @ PTE.page_address
240 */
241#define RK_IOVA_DTE_MASK 0xffc00000
242#define RK_IOVA_DTE_SHIFT 22
243#define RK_IOVA_PTE_MASK 0x003ff000
244#define RK_IOVA_PTE_SHIFT 12
245#define RK_IOVA_PAGE_MASK 0x00000fff
246#define RK_IOVA_PAGE_SHIFT 0
247
248static u32 rk_iova_dte_index(dma_addr_t iova)
249{
250 return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
251}
252
253static u32 rk_iova_pte_index(dma_addr_t iova)
254{
255 return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
256}
257
258static u32 rk_iova_page_offset(dma_addr_t iova)
259{
260 return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
261}
262
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800263static u32 rk_iommu_read(void __iomem *base, u32 offset)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800264{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800265 return readl(base + offset);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800266}
267
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800268static void rk_iommu_write(void __iomem *base, u32 offset, u32 value)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800269{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800270 writel(value, base + offset);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800271}
272
273static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
274{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800275 int i;
276
277 for (i = 0; i < iommu->num_mmu; i++)
278 writel(command, iommu->bases[i] + RK_MMU_COMMAND);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800279}
280
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800281static void rk_iommu_base_command(void __iomem *base, u32 command)
282{
283 writel(command, base + RK_MMU_COMMAND);
284}
Tomasz Figabf2a5e72018-03-23 15:38:06 +0800285static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start,
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800286 size_t size)
287{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800288 int i;
Tomasz Figabf2a5e72018-03-23 15:38:06 +0800289 dma_addr_t iova_end = iova_start + size;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800290 /*
291 * TODO(djkurtz): Figure out when it is more efficient to shootdown the
292 * entire iotlb rather than iterate over individual iovas.
293 */
Tomasz Figabf2a5e72018-03-23 15:38:06 +0800294 for (i = 0; i < iommu->num_mmu; i++) {
295 dma_addr_t iova;
296
297 for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800298 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
Tomasz Figabf2a5e72018-03-23 15:38:06 +0800299 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800300}
301
302static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
303{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800304 bool active = true;
305 int i;
306
307 for (i = 0; i < iommu->num_mmu; i++)
John Keepingfbedd9b2016-04-05 15:05:46 +0100308 active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
309 RK_MMU_STATUS_STALL_ACTIVE);
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800310
311 return active;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800312}
313
314static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
315{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800316 bool enable = true;
317 int i;
318
319 for (i = 0; i < iommu->num_mmu; i++)
John Keepingfbedd9b2016-04-05 15:05:46 +0100320 enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
321 RK_MMU_STATUS_PAGING_ENABLED);
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800322
323 return enable;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800324}
325
Tomasz Figa0416bf62018-03-23 15:38:05 +0800326static bool rk_iommu_is_reset_done(struct rk_iommu *iommu)
327{
328 bool done = true;
329 int i;
330
331 for (i = 0; i < iommu->num_mmu; i++)
332 done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0;
333
334 return done;
335}
336
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800337static int rk_iommu_enable_stall(struct rk_iommu *iommu)
338{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800339 int ret, i;
Tomasz Figa0416bf62018-03-23 15:38:05 +0800340 bool val;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800341
342 if (rk_iommu_is_stall_active(iommu))
343 return 0;
344
345 /* Stall can only be enabled if paging is enabled */
346 if (!rk_iommu_is_paging_enabled(iommu))
347 return 0;
348
349 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
350
Tomasz Figa0416bf62018-03-23 15:38:05 +0800351 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
352 val, RK_MMU_POLL_PERIOD_US,
353 RK_MMU_POLL_TIMEOUT_US);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800354 if (ret)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800355 for (i = 0; i < iommu->num_mmu; i++)
356 dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
357 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800358
359 return ret;
360}
361
362static int rk_iommu_disable_stall(struct rk_iommu *iommu)
363{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800364 int ret, i;
Tomasz Figa0416bf62018-03-23 15:38:05 +0800365 bool val;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800366
367 if (!rk_iommu_is_stall_active(iommu))
368 return 0;
369
370 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
371
Tomasz Figa0416bf62018-03-23 15:38:05 +0800372 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
373 !val, RK_MMU_POLL_PERIOD_US,
374 RK_MMU_POLL_TIMEOUT_US);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800375 if (ret)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800376 for (i = 0; i < iommu->num_mmu; i++)
377 dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
378 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800379
380 return ret;
381}
382
383static int rk_iommu_enable_paging(struct rk_iommu *iommu)
384{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800385 int ret, i;
Tomasz Figa0416bf62018-03-23 15:38:05 +0800386 bool val;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800387
388 if (rk_iommu_is_paging_enabled(iommu))
389 return 0;
390
391 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
392
Tomasz Figa0416bf62018-03-23 15:38:05 +0800393 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
394 val, RK_MMU_POLL_PERIOD_US,
395 RK_MMU_POLL_TIMEOUT_US);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800396 if (ret)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800397 for (i = 0; i < iommu->num_mmu; i++)
398 dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
399 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800400
401 return ret;
402}
403
404static int rk_iommu_disable_paging(struct rk_iommu *iommu)
405{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800406 int ret, i;
Tomasz Figa0416bf62018-03-23 15:38:05 +0800407 bool val;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800408
409 if (!rk_iommu_is_paging_enabled(iommu))
410 return 0;
411
412 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
413
Tomasz Figa0416bf62018-03-23 15:38:05 +0800414 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
415 !val, RK_MMU_POLL_PERIOD_US,
416 RK_MMU_POLL_TIMEOUT_US);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800417 if (ret)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800418 for (i = 0; i < iommu->num_mmu; i++)
419 dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
420 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800421
422 return ret;
423}
424
425static int rk_iommu_force_reset(struct rk_iommu *iommu)
426{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800427 int ret, i;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800428 u32 dte_addr;
Tomasz Figa0416bf62018-03-23 15:38:05 +0800429 bool val;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800430
Simon Xuec3aa4742017-07-24 10:37:15 +0800431 if (iommu->reset_disabled)
432 return 0;
433
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800434 /*
435 * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
436 * and verifying that upper 5 nybbles are read back.
437 */
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800438 for (i = 0; i < iommu->num_mmu; i++) {
439 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800440
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800441 dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR);
442 if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
443 dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
444 return -EFAULT;
445 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800446 }
447
448 rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
449
Tomasz Figa0416bf62018-03-23 15:38:05 +0800450 ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val,
451 val, RK_MMU_FORCE_RESET_TIMEOUT_US,
452 RK_MMU_POLL_TIMEOUT_US);
453 if (ret) {
454 dev_err(iommu->dev, "FORCE_RESET command timed out\n");
455 return ret;
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800456 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800457
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800458 return 0;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800459}
460
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800461static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800462{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800463 void __iomem *base = iommu->bases[index];
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800464 u32 dte_index, pte_index, page_offset;
465 u32 mmu_dte_addr;
466 phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
467 u32 *dte_addr;
468 u32 dte;
469 phys_addr_t pte_addr_phys = 0;
470 u32 *pte_addr = NULL;
471 u32 pte = 0;
472 phys_addr_t page_addr_phys = 0;
473 u32 page_flags = 0;
474
475 dte_index = rk_iova_dte_index(iova);
476 pte_index = rk_iova_pte_index(iova);
477 page_offset = rk_iova_page_offset(iova);
478
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800479 mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800480 mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
481
482 dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
483 dte_addr = phys_to_virt(dte_addr_phys);
484 dte = *dte_addr;
485
486 if (!rk_dte_is_pt_valid(dte))
487 goto print_it;
488
489 pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
490 pte_addr = phys_to_virt(pte_addr_phys);
491 pte = *pte_addr;
492
493 if (!rk_pte_is_page_valid(pte))
494 goto print_it;
495
496 page_addr_phys = rk_pte_page_address(pte) + page_offset;
497 page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
498
499print_it:
500 dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
501 &iova, dte_index, pte_index, page_offset);
502 dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
503 &mmu_dte_addr_phys, &dte_addr_phys, dte,
504 rk_dte_is_pt_valid(dte), &pte_addr_phys, pte,
505 rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
506}
507
508static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
509{
510 struct rk_iommu *iommu = dev_id;
511 u32 status;
512 u32 int_status;
513 dma_addr_t iova;
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800514 irqreturn_t ret = IRQ_NONE;
515 int i;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800516
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800517 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
518
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800519 for (i = 0; i < iommu->num_mmu; i++) {
520 int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
521 if (int_status == 0)
522 continue;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800523
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800524 ret = IRQ_HANDLED;
525 iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800526
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800527 if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
528 int flags;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800529
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800530 status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS);
531 flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
532 IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800533
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800534 dev_err(iommu->dev, "Page fault at %pad of type %s\n",
535 &iova,
536 (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800537
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800538 log_iova(iommu, i, iova);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800539
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800540 /*
541 * Report page fault to any installed handlers.
542 * Ignore the return code, though, since we always zap cache
543 * and clear the page fault anyway.
544 */
545 if (iommu->domain)
546 report_iommu_fault(iommu->domain, iommu->dev, iova,
547 flags);
548 else
549 dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800550
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800551 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
552 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
553 }
554
555 if (int_status & RK_MMU_IRQ_BUS_ERROR)
556 dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
557
558 if (int_status & ~RK_MMU_IRQ_MASK)
559 dev_err(iommu->dev, "unexpected int_status: %#08x\n",
560 int_status);
561
562 rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800563 }
564
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800565 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
566
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800567 return ret;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800568}
569
570static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
571 dma_addr_t iova)
572{
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100573 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800574 unsigned long flags;
575 phys_addr_t pt_phys, phys = 0;
576 u32 dte, pte;
577 u32 *page_table;
578
579 spin_lock_irqsave(&rk_domain->dt_lock, flags);
580
581 dte = rk_domain->dt[rk_iova_dte_index(iova)];
582 if (!rk_dte_is_pt_valid(dte))
583 goto out;
584
585 pt_phys = rk_dte_pt_address(dte);
586 page_table = (u32 *)phys_to_virt(pt_phys);
587 pte = page_table[rk_iova_pte_index(iova)];
588 if (!rk_pte_is_page_valid(pte))
589 goto out;
590
591 phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
592out:
593 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
594
595 return phys;
596}
597
598static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
599 dma_addr_t iova, size_t size)
600{
601 struct list_head *pos;
602 unsigned long flags;
603
604 /* shootdown these iova from all iommus using this domain */
605 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
606 list_for_each(pos, &rk_domain->iommus) {
607 struct rk_iommu *iommu;
608 iommu = list_entry(pos, struct rk_iommu, node);
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800609 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800610 rk_iommu_zap_lines(iommu, iova, size);
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800611 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800612 }
613 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
614}
615
Tomasz Figad4dd9202015-04-20 20:43:44 +0900616static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
617 dma_addr_t iova, size_t size)
618{
619 rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
620 if (size > SPAGE_SIZE)
621 rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE,
622 SPAGE_SIZE);
623}
624
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800625static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
626 dma_addr_t iova)
627{
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800628 struct device *dev = &rk_domain->pdev->dev;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800629 u32 *page_table, *dte_addr;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800630 u32 dte_index, dte;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800631 phys_addr_t pt_phys;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800632 dma_addr_t pt_dma;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800633
634 assert_spin_locked(&rk_domain->dt_lock);
635
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800636 dte_index = rk_iova_dte_index(iova);
637 dte_addr = &rk_domain->dt[dte_index];
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800638 dte = *dte_addr;
639 if (rk_dte_is_pt_valid(dte))
640 goto done;
641
642 page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
643 if (!page_table)
644 return ERR_PTR(-ENOMEM);
645
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800646 pt_dma = dma_map_single(dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
647 if (dma_mapping_error(dev, pt_dma)) {
648 dev_err(dev, "DMA mapping error while allocating page table\n");
649 free_page((unsigned long)page_table);
650 return ERR_PTR(-ENOMEM);
651 }
652
653 dte = rk_mk_dte(pt_dma);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800654 *dte_addr = dte;
655
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800656 rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES);
657 rk_table_flush(rk_domain,
658 rk_domain->dt_dma + dte_index * sizeof(u32), 1);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800659done:
660 pt_phys = rk_dte_pt_address(dte);
661 return (u32 *)phys_to_virt(pt_phys);
662}
663
664static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800665 u32 *pte_addr, dma_addr_t pte_dma,
666 size_t size)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800667{
668 unsigned int pte_count;
669 unsigned int pte_total = size / SPAGE_SIZE;
670
671 assert_spin_locked(&rk_domain->dt_lock);
672
673 for (pte_count = 0; pte_count < pte_total; pte_count++) {
674 u32 pte = pte_addr[pte_count];
675 if (!rk_pte_is_page_valid(pte))
676 break;
677
678 pte_addr[pte_count] = rk_mk_pte_invalid(pte);
679 }
680
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800681 rk_table_flush(rk_domain, pte_dma, pte_count);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800682
683 return pte_count * SPAGE_SIZE;
684}
685
686static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800687 dma_addr_t pte_dma, dma_addr_t iova,
688 phys_addr_t paddr, size_t size, int prot)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800689{
690 unsigned int pte_count;
691 unsigned int pte_total = size / SPAGE_SIZE;
692 phys_addr_t page_phys;
693
694 assert_spin_locked(&rk_domain->dt_lock);
695
696 for (pte_count = 0; pte_count < pte_total; pte_count++) {
697 u32 pte = pte_addr[pte_count];
698
699 if (rk_pte_is_page_valid(pte))
700 goto unwind;
701
702 pte_addr[pte_count] = rk_mk_pte(paddr, prot);
703
704 paddr += SPAGE_SIZE;
705 }
706
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800707 rk_table_flush(rk_domain, pte_dma, pte_total);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800708
Tomasz Figad4dd9202015-04-20 20:43:44 +0900709 /*
710 * Zap the first and last iova to evict from iotlb any previously
711 * mapped cachelines holding stale values for its dte and pte.
712 * We only zap the first and last iova, since only they could have
713 * dte or pte shared with an existing mapping.
714 */
715 rk_iommu_zap_iova_first_last(rk_domain, iova, size);
716
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800717 return 0;
718unwind:
719 /* Unmap the range of iovas that we just mapped */
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800720 rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
721 pte_count * SPAGE_SIZE);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800722
723 iova += pte_count * SPAGE_SIZE;
724 page_phys = rk_pte_page_address(pte_addr[pte_count]);
725 pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
726 &iova, &page_phys, &paddr, prot);
727
728 return -EADDRINUSE;
729}
730
731static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
732 phys_addr_t paddr, size_t size, int prot)
733{
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100734 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800735 unsigned long flags;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800736 dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800737 u32 *page_table, *pte_addr;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800738 u32 dte_index, pte_index;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800739 int ret;
740
741 spin_lock_irqsave(&rk_domain->dt_lock, flags);
742
743 /*
744 * pgsize_bitmap specifies iova sizes that fit in one page table
745 * (1024 4-KiB pages = 4 MiB).
746 * So, size will always be 4096 <= size <= 4194304.
747 * Since iommu_map() guarantees that both iova and size will be
748 * aligned, we will always only be mapping from a single dte here.
749 */
750 page_table = rk_dte_get_page_table(rk_domain, iova);
751 if (IS_ERR(page_table)) {
752 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
753 return PTR_ERR(page_table);
754 }
755
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800756 dte_index = rk_domain->dt[rk_iova_dte_index(iova)];
757 pte_index = rk_iova_pte_index(iova);
758 pte_addr = &page_table[pte_index];
759 pte_dma = rk_dte_pt_address(dte_index) + pte_index * sizeof(u32);
760 ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
761 paddr, size, prot);
762
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800763 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
764
765 return ret;
766}
767
768static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
769 size_t size)
770{
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100771 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800772 unsigned long flags;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800773 dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800774 phys_addr_t pt_phys;
775 u32 dte;
776 u32 *pte_addr;
777 size_t unmap_size;
778
779 spin_lock_irqsave(&rk_domain->dt_lock, flags);
780
781 /*
782 * pgsize_bitmap specifies iova sizes that fit in one page table
783 * (1024 4-KiB pages = 4 MiB).
784 * So, size will always be 4096 <= size <= 4194304.
785 * Since iommu_unmap() guarantees that both iova and size will be
786 * aligned, we will always only be unmapping from a single dte here.
787 */
788 dte = rk_domain->dt[rk_iova_dte_index(iova)];
789 /* Just return 0 if iova is unmapped */
790 if (!rk_dte_is_pt_valid(dte)) {
791 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
792 return 0;
793 }
794
795 pt_phys = rk_dte_pt_address(dte);
796 pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800797 pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
798 unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800799
800 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
801
802 /* Shootdown iotlb entries for iova range that was just unmapped */
803 rk_iommu_zap_iova(rk_domain, iova, unmap_size);
804
805 return unmap_size;
806}
807
808static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
809{
810 struct iommu_group *group;
811 struct device *iommu_dev;
812 struct rk_iommu *rk_iommu;
813
814 group = iommu_group_get(dev);
815 if (!group)
816 return NULL;
817 iommu_dev = iommu_group_get_iommudata(group);
818 rk_iommu = dev_get_drvdata(iommu_dev);
819 iommu_group_put(group);
820
821 return rk_iommu;
822}
823
824static int rk_iommu_attach_device(struct iommu_domain *domain,
825 struct device *dev)
826{
827 struct rk_iommu *iommu;
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100828 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800829 unsigned long flags;
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800830 int ret, i;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800831
832 /*
833 * Allow 'virtual devices' (e.g., drm) to attach to domain.
834 * Such a device does not belong to an iommu group.
835 */
836 iommu = rk_iommu_from_dev(dev);
837 if (!iommu)
838 return 0;
839
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800840 ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800841 if (ret)
842 return ret;
843
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800844 ret = rk_iommu_enable_stall(iommu);
845 if (ret)
846 goto out_disable_clocks;
847
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800848 ret = rk_iommu_force_reset(iommu);
849 if (ret)
Tomasz Figaf6717d72018-03-23 15:38:04 +0800850 goto out_disable_stall;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800851
852 iommu->domain = domain;
853
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800854 for (i = 0; i < iommu->num_mmu; i++) {
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800855 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
856 rk_domain->dt_dma);
John Keepingae8a7912016-06-01 16:46:10 +0100857 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800858 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
859 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800860
861 ret = rk_iommu_enable_paging(iommu);
862 if (ret)
Tomasz Figaf6717d72018-03-23 15:38:04 +0800863 goto out_disable_stall;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800864
865 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
866 list_add_tail(&iommu->node, &rk_domain->iommus);
867 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
868
Heiko Stuebnerec4292d2015-05-21 09:57:29 +0200869 dev_dbg(dev, "Attached to iommu domain\n");
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800870
Tomasz Figaf6717d72018-03-23 15:38:04 +0800871out_disable_stall:
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800872 rk_iommu_disable_stall(iommu);
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800873out_disable_clocks:
874 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
Tomasz Figaf6717d72018-03-23 15:38:04 +0800875 return ret;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800876}
877
878static void rk_iommu_detach_device(struct iommu_domain *domain,
879 struct device *dev)
880{
881 struct rk_iommu *iommu;
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100882 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800883 unsigned long flags;
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800884 int i;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800885
886 /* Allow 'virtual devices' (eg drm) to detach from domain */
887 iommu = rk_iommu_from_dev(dev);
888 if (!iommu)
889 return;
890
891 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
892 list_del_init(&iommu->node);
893 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
894
895 /* Ignore error while disabling, just keep going */
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800896 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800897 rk_iommu_enable_stall(iommu);
898 rk_iommu_disable_paging(iommu);
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800899 for (i = 0; i < iommu->num_mmu; i++) {
900 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
901 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
902 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800903 rk_iommu_disable_stall(iommu);
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800904 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800905
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800906 iommu->domain = NULL;
907
Heiko Stuebnerec4292d2015-05-21 09:57:29 +0200908 dev_dbg(dev, "Detached from iommu domain\n");
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800909}
910
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100911static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800912{
913 struct rk_iommu_domain *rk_domain;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800914 struct platform_device *pdev;
915 struct device *iommu_dev;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800916
Shunqian Zhenga93db2f2016-06-24 10:13:30 +0800917 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100918 return NULL;
919
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800920 /* Register a pdev per domain, so DMA API can base on this *dev
921 * even some virtual master doesn't have an iommu slave
922 */
923 pdev = platform_device_register_simple("rk_iommu_domain",
924 PLATFORM_DEVID_AUTO, NULL, 0);
925 if (IS_ERR(pdev))
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100926 return NULL;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800927
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800928 rk_domain = devm_kzalloc(&pdev->dev, sizeof(*rk_domain), GFP_KERNEL);
929 if (!rk_domain)
930 goto err_unreg_pdev;
931
932 rk_domain->pdev = pdev;
933
Shunqian Zhenga93db2f2016-06-24 10:13:30 +0800934 if (type == IOMMU_DOMAIN_DMA &&
935 iommu_get_dma_cookie(&rk_domain->domain))
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800936 goto err_unreg_pdev;
937
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800938 /*
939 * rk32xx iommus use a 2 level pagetable.
940 * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
941 * Allocate one 4 KiB page for each table.
942 */
943 rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
944 if (!rk_domain->dt)
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800945 goto err_put_cookie;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800946
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800947 iommu_dev = &pdev->dev;
948 rk_domain->dt_dma = dma_map_single(iommu_dev, rk_domain->dt,
949 SPAGE_SIZE, DMA_TO_DEVICE);
950 if (dma_mapping_error(iommu_dev, rk_domain->dt_dma)) {
951 dev_err(iommu_dev, "DMA map error for DT\n");
952 goto err_free_dt;
953 }
954
955 rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800956
957 spin_lock_init(&rk_domain->iommus_lock);
958 spin_lock_init(&rk_domain->dt_lock);
959 INIT_LIST_HEAD(&rk_domain->iommus);
960
Shunqian Zhenga93db2f2016-06-24 10:13:30 +0800961 rk_domain->domain.geometry.aperture_start = 0;
962 rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32);
963 rk_domain->domain.geometry.force_aperture = true;
964
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100965 return &rk_domain->domain;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800966
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800967err_free_dt:
968 free_page((unsigned long)rk_domain->dt);
969err_put_cookie:
Shunqian Zhenga93db2f2016-06-24 10:13:30 +0800970 if (type == IOMMU_DOMAIN_DMA)
971 iommu_put_dma_cookie(&rk_domain->domain);
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800972err_unreg_pdev:
973 platform_device_unregister(pdev);
974
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100975 return NULL;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800976}
977
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100978static void rk_iommu_domain_free(struct iommu_domain *domain)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800979{
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100980 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800981 int i;
982
983 WARN_ON(!list_empty(&rk_domain->iommus));
984
985 for (i = 0; i < NUM_DT_ENTRIES; i++) {
986 u32 dte = rk_domain->dt[i];
987 if (rk_dte_is_pt_valid(dte)) {
988 phys_addr_t pt_phys = rk_dte_pt_address(dte);
989 u32 *page_table = phys_to_virt(pt_phys);
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800990 dma_unmap_single(&rk_domain->pdev->dev, pt_phys,
991 SPAGE_SIZE, DMA_TO_DEVICE);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800992 free_page((unsigned long)page_table);
993 }
994 }
995
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800996 dma_unmap_single(&rk_domain->pdev->dev, rk_domain->dt_dma,
997 SPAGE_SIZE, DMA_TO_DEVICE);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800998 free_page((unsigned long)rk_domain->dt);
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800999
Shunqian Zhenga93db2f2016-06-24 10:13:30 +08001000 if (domain->type == IOMMU_DOMAIN_DMA)
1001 iommu_put_dma_cookie(&rk_domain->domain);
Shunqian Zheng4f0aba62016-06-24 10:13:29 +08001002
1003 platform_device_unregister(rk_domain->pdev);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001004}
1005
1006static bool rk_iommu_is_dev_iommu_master(struct device *dev)
1007{
1008 struct device_node *np = dev->of_node;
1009 int ret;
1010
1011 /*
1012 * An iommu master has an iommus property containing a list of phandles
1013 * to iommu nodes, each with an #iommu-cells property with value 0.
1014 */
1015 ret = of_count_phandle_with_args(np, "iommus", "#iommu-cells");
1016 return (ret > 0);
1017}
1018
1019static int rk_iommu_group_set_iommudata(struct iommu_group *group,
1020 struct device *dev)
1021{
1022 struct device_node *np = dev->of_node;
1023 struct platform_device *pd;
1024 int ret;
1025 struct of_phandle_args args;
1026
1027 /*
1028 * An iommu master has an iommus property containing a list of phandles
1029 * to iommu nodes, each with an #iommu-cells property with value 0.
1030 */
1031 ret = of_parse_phandle_with_args(np, "iommus", "#iommu-cells", 0,
1032 &args);
1033 if (ret) {
Rob Herring6bd4f1c2017-07-18 16:43:09 -05001034 dev_err(dev, "of_parse_phandle_with_args(%pOF) => %d\n",
1035 np, ret);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001036 return ret;
1037 }
1038 if (args.args_count != 0) {
Rob Herring6bd4f1c2017-07-18 16:43:09 -05001039 dev_err(dev, "incorrect number of iommu params found for %pOF (found %d, expected 0)\n",
1040 args.np, args.args_count);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001041 return -EINVAL;
1042 }
1043
1044 pd = of_find_device_by_node(args.np);
1045 of_node_put(args.np);
1046 if (!pd) {
Rob Herring6bd4f1c2017-07-18 16:43:09 -05001047 dev_err(dev, "iommu %pOF not found\n", args.np);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001048 return -EPROBE_DEFER;
1049 }
1050
1051 /* TODO(djkurtz): handle multiple slave iommus for a single master */
1052 iommu_group_set_iommudata(group, &pd->dev, NULL);
1053
1054 return 0;
1055}
1056
1057static int rk_iommu_add_device(struct device *dev)
1058{
1059 struct iommu_group *group;
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001060 struct rk_iommu *iommu;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001061 int ret;
1062
1063 if (!rk_iommu_is_dev_iommu_master(dev))
1064 return -ENODEV;
1065
1066 group = iommu_group_get(dev);
1067 if (!group) {
1068 group = iommu_group_alloc();
1069 if (IS_ERR(group)) {
1070 dev_err(dev, "Failed to allocate IOMMU group\n");
1071 return PTR_ERR(group);
1072 }
1073 }
1074
1075 ret = iommu_group_add_device(group, dev);
1076 if (ret)
1077 goto err_put_group;
1078
1079 ret = rk_iommu_group_set_iommudata(group, dev);
1080 if (ret)
1081 goto err_remove_device;
1082
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001083 iommu = rk_iommu_from_dev(dev);
1084 if (iommu)
1085 iommu_device_link(&iommu->iommu, dev);
1086
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001087 iommu_group_put(group);
1088
1089 return 0;
1090
1091err_remove_device:
1092 iommu_group_remove_device(dev);
1093err_put_group:
1094 iommu_group_put(group);
1095 return ret;
1096}
1097
1098static void rk_iommu_remove_device(struct device *dev)
1099{
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001100 struct rk_iommu *iommu;
1101
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001102 if (!rk_iommu_is_dev_iommu_master(dev))
1103 return;
1104
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001105 iommu = rk_iommu_from_dev(dev);
1106 if (iommu)
1107 iommu_device_unlink(&iommu->iommu, dev);
1108
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001109 iommu_group_remove_device(dev);
1110}
1111
1112static const struct iommu_ops rk_iommu_ops = {
Joerg Roedelbcd516a2015-03-26 13:43:17 +01001113 .domain_alloc = rk_iommu_domain_alloc,
1114 .domain_free = rk_iommu_domain_free,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001115 .attach_dev = rk_iommu_attach_device,
1116 .detach_dev = rk_iommu_detach_device,
1117 .map = rk_iommu_map,
1118 .unmap = rk_iommu_unmap,
Simon Xuee6d0f472016-06-24 10:13:27 +08001119 .map_sg = default_iommu_map_sg,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001120 .add_device = rk_iommu_add_device,
1121 .remove_device = rk_iommu_remove_device,
1122 .iova_to_phys = rk_iommu_iova_to_phys,
1123 .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
1124};
1125
Shunqian Zheng4f0aba62016-06-24 10:13:29 +08001126static int rk_iommu_domain_probe(struct platform_device *pdev)
1127{
1128 struct device *dev = &pdev->dev;
1129
1130 dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
1131 if (!dev->dma_parms)
1132 return -ENOMEM;
1133
1134 /* Set dma_ops for dev, otherwise it would be dummy_dma_ops */
1135 arch_setup_dma_ops(dev, 0, DMA_BIT_MASK(32), NULL, false);
1136
1137 dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
1138 dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
1139
1140 return 0;
1141}
1142
1143static struct platform_driver rk_iommu_domain_driver = {
1144 .probe = rk_iommu_domain_probe,
1145 .driver = {
1146 .name = "rk_iommu_domain",
1147 },
1148};
1149
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001150static int rk_iommu_probe(struct platform_device *pdev)
1151{
1152 struct device *dev = &pdev->dev;
1153 struct rk_iommu *iommu;
1154 struct resource *res;
Shunqian Zheng3d08f4342016-06-24 10:13:28 +08001155 int num_res = pdev->num_resources;
Jeffy Chend0b912b2018-03-23 15:38:03 +08001156 int err, i, irq;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001157
1158 iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
1159 if (!iommu)
1160 return -ENOMEM;
1161
1162 platform_set_drvdata(pdev, iommu);
1163 iommu->dev = dev;
ZhengShunQiancd6438c2016-01-19 15:03:00 +08001164 iommu->num_mmu = 0;
Shunqian Zheng3d08f4342016-06-24 10:13:28 +08001165
1166 iommu->bases = devm_kzalloc(dev, sizeof(*iommu->bases) * num_res,
ZhengShunQiancd6438c2016-01-19 15:03:00 +08001167 GFP_KERNEL);
1168 if (!iommu->bases)
1169 return -ENOMEM;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001170
Shunqian Zheng3d08f4342016-06-24 10:13:28 +08001171 for (i = 0; i < num_res; i++) {
ZhengShunQiancd6438c2016-01-19 15:03:00 +08001172 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
Tomeu Vizoso8d7f2d82016-03-21 12:00:23 +01001173 if (!res)
1174 continue;
ZhengShunQiancd6438c2016-01-19 15:03:00 +08001175 iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
1176 if (IS_ERR(iommu->bases[i]))
1177 continue;
1178 iommu->num_mmu++;
1179 }
1180 if (iommu->num_mmu == 0)
1181 return PTR_ERR(iommu->bases[0]);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001182
Jeffy Chend0b912b2018-03-23 15:38:03 +08001183 i = 0;
1184 while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) {
1185 if (irq < 0)
1186 return irq;
Simon Xue03f732f2017-07-24 10:37:14 +08001187
Jeffy Chend0b912b2018-03-23 15:38:03 +08001188 err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
1189 IRQF_SHARED, dev_name(dev), iommu);
1190 if (err)
1191 return err;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001192 }
1193
Simon Xuec3aa4742017-07-24 10:37:15 +08001194 iommu->reset_disabled = device_property_read_bool(dev,
1195 "rockchip,disable-mmu-reset");
1196
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001197 iommu->num_clocks = ARRAY_SIZE(rk_iommu_clocks);
1198 iommu->clocks = devm_kcalloc(iommu->dev, iommu->num_clocks,
1199 sizeof(*iommu->clocks), GFP_KERNEL);
1200 if (!iommu->clocks)
1201 return -ENOMEM;
1202
1203 for (i = 0; i < iommu->num_clocks; ++i)
1204 iommu->clocks[i].id = rk_iommu_clocks[i];
1205
1206 err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks);
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001207 if (err)
1208 return err;
1209
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001210 err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks);
1211 if (err)
1212 return err;
1213
1214 err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
1215 if (err)
1216 goto err_unprepare_clocks;
1217
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001218 iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
1219 err = iommu_device_register(&iommu->iommu);
Jeffy Chen6d9ffaa2018-03-23 15:38:02 +08001220 if (err)
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001221 goto err_remove_sysfs;
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001222
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001223 return 0;
1224err_remove_sysfs:
1225 iommu_device_sysfs_remove(&iommu->iommu);
1226err_unprepare_clocks:
1227 clk_bulk_unprepare(iommu->num_clocks, iommu->clocks);
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001228 return err;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001229}
1230
Marc Zyngier1a4e90f2018-02-20 20:25:04 +00001231static void rk_iommu_shutdown(struct platform_device *pdev)
1232{
1233 struct rk_iommu *iommu = platform_get_drvdata(pdev);
1234
1235 /*
1236 * Be careful not to try to shutdown an otherwise unused
1237 * IOMMU, as it is likely not to be clocked, and accessing it
1238 * would just block. An IOMMU without a domain is likely to be
1239 * unused, so let's use this as a (weak) guard.
1240 */
1241 if (iommu && iommu->domain) {
1242 rk_iommu_enable_stall(iommu);
1243 rk_iommu_disable_paging(iommu);
1244 rk_iommu_force_reset(iommu);
1245 }
1246}
1247
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001248static const struct of_device_id rk_iommu_dt_ids[] = {
1249 { .compatible = "rockchip,iommu" },
1250 { /* sentinel */ }
1251};
1252MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001253
1254static struct platform_driver rk_iommu_driver = {
1255 .probe = rk_iommu_probe,
Marc Zyngier1a4e90f2018-02-20 20:25:04 +00001256 .shutdown = rk_iommu_shutdown,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001257 .driver = {
1258 .name = "rk_iommu",
Arnd Bergmannd9e7eb12015-04-10 23:58:24 +02001259 .of_match_table = rk_iommu_dt_ids,
Jeffy Chen98b72b92018-03-23 15:38:01 +08001260 .suppress_bind_attrs = true,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001261 },
1262};
1263
1264static int __init rk_iommu_init(void)
1265{
Thierry Reding425061b2015-02-06 11:44:07 +01001266 struct device_node *np;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001267 int ret;
1268
Thierry Reding425061b2015-02-06 11:44:07 +01001269 np = of_find_matching_node(NULL, rk_iommu_dt_ids);
1270 if (!np)
1271 return 0;
1272
1273 of_node_put(np);
1274
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001275 ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
1276 if (ret)
1277 return ret;
1278
Shunqian Zheng4f0aba62016-06-24 10:13:29 +08001279 ret = platform_driver_register(&rk_iommu_domain_driver);
1280 if (ret)
1281 return ret;
1282
1283 ret = platform_driver_register(&rk_iommu_driver);
1284 if (ret)
1285 platform_driver_unregister(&rk_iommu_domain_driver);
1286 return ret;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001287}
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001288subsys_initcall(rk_iommu_init);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001289
1290MODULE_DESCRIPTION("IOMMU API for Rockchip");
1291MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>");
1292MODULE_ALIAS("platform:rockchip-iommu");
1293MODULE_LICENSE("GPL v2");