blob: e845bd01a1a29fb6fc8e7de0234471b5bd64e672 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Daniel Kurtzc68a2922014-11-03 10:53:27 +08002/*
Paul Gortmaker669a0472018-12-01 14:19:10 -05003 * IOMMU API for Rockchip
4 *
5 * Module Authors: Simon Xue <xxm@rock-chips.com>
6 * Daniel Kurtz <djkurtz@chromium.org>
Daniel Kurtzc68a2922014-11-03 10:53:27 +08007 */
8
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08009#include <linux/clk.h>
Daniel Kurtzc68a2922014-11-03 10:53:27 +080010#include <linux/compiler.h>
11#include <linux/delay.h>
12#include <linux/device.h>
Shunqian Zheng4f0aba62016-06-24 10:13:29 +080013#include <linux/dma-iommu.h>
Joerg Roedel461a6942017-04-26 15:46:20 +020014#include <linux/dma-mapping.h>
Daniel Kurtzc68a2922014-11-03 10:53:27 +080015#include <linux/errno.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/iommu.h>
Tomasz Figa0416bf62018-03-23 15:38:05 +080019#include <linux/iopoll.h>
Daniel Kurtzc68a2922014-11-03 10:53:27 +080020#include <linux/list.h>
21#include <linux/mm.h>
Paul Gortmaker669a0472018-12-01 14:19:10 -050022#include <linux/init.h>
Daniel Kurtzc68a2922014-11-03 10:53:27 +080023#include <linux/of.h>
Jeffy Chen5fd577c2018-03-23 15:38:11 +080024#include <linux/of_iommu.h>
Daniel Kurtzc68a2922014-11-03 10:53:27 +080025#include <linux/of_platform.h>
26#include <linux/platform_device.h>
Jeffy Chen0f181d32018-03-23 15:38:13 +080027#include <linux/pm_runtime.h>
Daniel Kurtzc68a2922014-11-03 10:53:27 +080028#include <linux/slab.h>
29#include <linux/spinlock.h>
30
31/** MMU register offsets */
32#define RK_MMU_DTE_ADDR 0x00 /* Directory table address */
33#define RK_MMU_STATUS 0x04
34#define RK_MMU_COMMAND 0x08
35#define RK_MMU_PAGE_FAULT_ADDR 0x0C /* IOVA of last page fault */
36#define RK_MMU_ZAP_ONE_LINE 0x10 /* Shootdown one IOTLB entry */
37#define RK_MMU_INT_RAWSTAT 0x14 /* IRQ status ignoring mask */
38#define RK_MMU_INT_CLEAR 0x18 /* Acknowledge and re-arm irq */
39#define RK_MMU_INT_MASK 0x1C /* IRQ enable */
40#define RK_MMU_INT_STATUS 0x20 /* IRQ status after masking */
41#define RK_MMU_AUTO_GATING 0x24
42
43#define DTE_ADDR_DUMMY 0xCAFEBABE
Tomasz Figa0416bf62018-03-23 15:38:05 +080044
45#define RK_MMU_POLL_PERIOD_US 100
46#define RK_MMU_FORCE_RESET_TIMEOUT_US 100000
47#define RK_MMU_POLL_TIMEOUT_US 1000
Daniel Kurtzc68a2922014-11-03 10:53:27 +080048
49/* RK_MMU_STATUS fields */
50#define RK_MMU_STATUS_PAGING_ENABLED BIT(0)
51#define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1)
52#define RK_MMU_STATUS_STALL_ACTIVE BIT(2)
53#define RK_MMU_STATUS_IDLE BIT(3)
54#define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4)
55#define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5)
56#define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31)
57
58/* RK_MMU_COMMAND command values */
59#define RK_MMU_CMD_ENABLE_PAGING 0 /* Enable memory translation */
60#define RK_MMU_CMD_DISABLE_PAGING 1 /* Disable memory translation */
61#define RK_MMU_CMD_ENABLE_STALL 2 /* Stall paging to allow other cmds */
62#define RK_MMU_CMD_DISABLE_STALL 3 /* Stop stall re-enables paging */
63#define RK_MMU_CMD_ZAP_CACHE 4 /* Shoot down entire IOTLB */
64#define RK_MMU_CMD_PAGE_FAULT_DONE 5 /* Clear page fault */
65#define RK_MMU_CMD_FORCE_RESET 6 /* Reset all registers */
66
67/* RK_MMU_INT_* register fields */
68#define RK_MMU_IRQ_PAGE_FAULT 0x01 /* page fault */
69#define RK_MMU_IRQ_BUS_ERROR 0x02 /* bus read error */
70#define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR)
71
72#define NUM_DT_ENTRIES 1024
73#define NUM_PT_ENTRIES 1024
74
75#define SPAGE_ORDER 12
76#define SPAGE_SIZE (1 << SPAGE_ORDER)
77
78 /*
79 * Support mapping any size that fits in one page table:
80 * 4 KiB to 4 MiB
81 */
82#define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
83
Daniel Kurtzc68a2922014-11-03 10:53:27 +080084struct rk_iommu_domain {
85 struct list_head iommus;
86 u32 *dt; /* page directory table */
Shunqian Zheng4f0aba62016-06-24 10:13:29 +080087 dma_addr_t dt_dma;
Daniel Kurtzc68a2922014-11-03 10:53:27 +080088 spinlock_t iommus_lock; /* lock for iommus list */
89 spinlock_t dt_lock; /* lock for modifying page directory table */
Joerg Roedelbcd516a2015-03-26 13:43:17 +010090
91 struct iommu_domain domain;
Daniel Kurtzc68a2922014-11-03 10:53:27 +080092};
93
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +080094/* list of clocks required by IOMMU */
95static const char * const rk_iommu_clocks[] = {
96 "aclk", "iface",
97};
98
Daniel Kurtzc68a2922014-11-03 10:53:27 +080099struct rk_iommu {
100 struct device *dev;
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800101 void __iomem **bases;
102 int num_mmu;
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800103 struct clk_bulk_data *clocks;
104 int num_clocks;
Simon Xuec3aa4742017-07-24 10:37:15 +0800105 bool reset_disabled;
Joerg Roedelc9d9f232017-03-31 16:26:03 +0200106 struct iommu_device iommu;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800107 struct list_head node; /* entry in rk_iommu_domain.iommus */
108 struct iommu_domain *domain; /* domain to which iommu is attached */
Jeffy Chen57c26952018-03-23 15:38:14 +0800109 struct iommu_group *group;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800110};
111
Jeffy Chen5fd577c2018-03-23 15:38:11 +0800112struct rk_iommudata {
Jeffy Chen0f181d32018-03-23 15:38:13 +0800113 struct device_link *link; /* runtime PM link from IOMMU to master */
Jeffy Chen5fd577c2018-03-23 15:38:11 +0800114 struct rk_iommu *iommu;
115};
116
Jeffy Chen9176a302018-03-23 15:38:10 +0800117static struct device *dma_dev;
118
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800119static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
120 unsigned int count)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800121{
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800122 size_t size = count * sizeof(u32); /* count of u32 entry */
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800123
Jeffy Chen9176a302018-03-23 15:38:10 +0800124 dma_sync_single_for_device(dma_dev, dma, size, DMA_TO_DEVICE);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800125}
126
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100127static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
128{
129 return container_of(dom, struct rk_iommu_domain, domain);
130}
131
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800132/*
133 * The Rockchip rk3288 iommu uses a 2-level page table.
134 * The first level is the "Directory Table" (DT).
135 * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing
136 * to a "Page Table".
137 * The second level is the 1024 Page Tables (PT).
138 * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to
139 * a 4 KB page of physical memory.
140 *
141 * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries).
142 * Each iommu device has a MMU_DTE_ADDR register that contains the physical
143 * address of the start of the DT page.
144 *
145 * The structure of the page table is as follows:
146 *
147 * DT
148 * MMU_DTE_ADDR -> +-----+
149 * | |
150 * +-----+ PT
151 * | DTE | -> +-----+
152 * +-----+ | | Memory
153 * | | +-----+ Page
154 * | | | PTE | -> +-----+
155 * +-----+ +-----+ | |
156 * | | | |
157 * | | | |
158 * +-----+ | |
159 * | |
160 * | |
161 * +-----+
162 */
163
164/*
165 * Each DTE has a PT address and a valid bit:
166 * +---------------------+-----------+-+
167 * | PT address | Reserved |V|
168 * +---------------------+-----------+-+
169 * 31:12 - PT address (PTs always starts on a 4 KB boundary)
170 * 11: 1 - Reserved
171 * 0 - 1 if PT @ PT address is valid
172 */
173#define RK_DTE_PT_ADDRESS_MASK 0xfffff000
174#define RK_DTE_PT_VALID BIT(0)
175
176static inline phys_addr_t rk_dte_pt_address(u32 dte)
177{
178 return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
179}
180
181static inline bool rk_dte_is_pt_valid(u32 dte)
182{
183 return dte & RK_DTE_PT_VALID;
184}
185
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800186static inline u32 rk_mk_dte(dma_addr_t pt_dma)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800187{
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800188 return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800189}
190
191/*
192 * Each PTE has a Page address, some flags and a valid bit:
193 * +---------------------+---+-------+-+
194 * | Page address |Rsv| Flags |V|
195 * +---------------------+---+-------+-+
196 * 31:12 - Page address (Pages always start on a 4 KB boundary)
197 * 11: 9 - Reserved
198 * 8: 1 - Flags
199 * 8 - Read allocate - allocate cache space on read misses
200 * 7 - Read cache - enable cache & prefetch of data
201 * 6 - Write buffer - enable delaying writes on their way to memory
202 * 5 - Write allocate - allocate cache space on write misses
203 * 4 - Write cache - different writes can be merged together
204 * 3 - Override cache attributes
205 * if 1, bits 4-8 control cache attributes
206 * if 0, the system bus defaults are used
207 * 2 - Writable
208 * 1 - Readable
209 * 0 - 1 if Page @ Page address is valid
210 */
211#define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000
212#define RK_PTE_PAGE_FLAGS_MASK 0x000001fe
213#define RK_PTE_PAGE_WRITABLE BIT(2)
214#define RK_PTE_PAGE_READABLE BIT(1)
215#define RK_PTE_PAGE_VALID BIT(0)
216
217static inline phys_addr_t rk_pte_page_address(u32 pte)
218{
219 return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
220}
221
222static inline bool rk_pte_is_page_valid(u32 pte)
223{
224 return pte & RK_PTE_PAGE_VALID;
225}
226
227/* TODO: set cache flags per prot IOMMU_CACHE */
228static u32 rk_mk_pte(phys_addr_t page, int prot)
229{
230 u32 flags = 0;
231 flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
232 flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
233 page &= RK_PTE_PAGE_ADDRESS_MASK;
234 return page | flags | RK_PTE_PAGE_VALID;
235}
236
237static u32 rk_mk_pte_invalid(u32 pte)
238{
239 return pte & ~RK_PTE_PAGE_VALID;
240}
241
242/*
243 * rk3288 iova (IOMMU Virtual Address) format
244 * 31 22.21 12.11 0
245 * +-----------+-----------+-------------+
246 * | DTE index | PTE index | Page offset |
247 * +-----------+-----------+-------------+
248 * 31:22 - DTE index - index of DTE in DT
249 * 21:12 - PTE index - index of PTE in PT @ DTE.pt_address
250 * 11: 0 - Page offset - offset into page @ PTE.page_address
251 */
252#define RK_IOVA_DTE_MASK 0xffc00000
253#define RK_IOVA_DTE_SHIFT 22
254#define RK_IOVA_PTE_MASK 0x003ff000
255#define RK_IOVA_PTE_SHIFT 12
256#define RK_IOVA_PAGE_MASK 0x00000fff
257#define RK_IOVA_PAGE_SHIFT 0
258
259static u32 rk_iova_dte_index(dma_addr_t iova)
260{
261 return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
262}
263
264static u32 rk_iova_pte_index(dma_addr_t iova)
265{
266 return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
267}
268
269static u32 rk_iova_page_offset(dma_addr_t iova)
270{
271 return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
272}
273
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800274static u32 rk_iommu_read(void __iomem *base, u32 offset)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800275{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800276 return readl(base + offset);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800277}
278
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800279static void rk_iommu_write(void __iomem *base, u32 offset, u32 value)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800280{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800281 writel(value, base + offset);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800282}
283
284static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
285{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800286 int i;
287
288 for (i = 0; i < iommu->num_mmu; i++)
289 writel(command, iommu->bases[i] + RK_MMU_COMMAND);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800290}
291
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800292static void rk_iommu_base_command(void __iomem *base, u32 command)
293{
294 writel(command, base + RK_MMU_COMMAND);
295}
Tomasz Figabf2a5e72018-03-23 15:38:06 +0800296static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start,
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800297 size_t size)
298{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800299 int i;
Tomasz Figabf2a5e72018-03-23 15:38:06 +0800300 dma_addr_t iova_end = iova_start + size;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800301 /*
302 * TODO(djkurtz): Figure out when it is more efficient to shootdown the
303 * entire iotlb rather than iterate over individual iovas.
304 */
Tomasz Figabf2a5e72018-03-23 15:38:06 +0800305 for (i = 0; i < iommu->num_mmu; i++) {
306 dma_addr_t iova;
307
308 for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800309 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
Tomasz Figabf2a5e72018-03-23 15:38:06 +0800310 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800311}
312
313static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
314{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800315 bool active = true;
316 int i;
317
318 for (i = 0; i < iommu->num_mmu; i++)
John Keepingfbedd9b2016-04-05 15:05:46 +0100319 active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
320 RK_MMU_STATUS_STALL_ACTIVE);
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800321
322 return active;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800323}
324
325static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
326{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800327 bool enable = true;
328 int i;
329
330 for (i = 0; i < iommu->num_mmu; i++)
John Keepingfbedd9b2016-04-05 15:05:46 +0100331 enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
332 RK_MMU_STATUS_PAGING_ENABLED);
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800333
334 return enable;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800335}
336
Tomasz Figa0416bf62018-03-23 15:38:05 +0800337static bool rk_iommu_is_reset_done(struct rk_iommu *iommu)
338{
339 bool done = true;
340 int i;
341
342 for (i = 0; i < iommu->num_mmu; i++)
343 done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0;
344
345 return done;
346}
347
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800348static int rk_iommu_enable_stall(struct rk_iommu *iommu)
349{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800350 int ret, i;
Tomasz Figa0416bf62018-03-23 15:38:05 +0800351 bool val;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800352
353 if (rk_iommu_is_stall_active(iommu))
354 return 0;
355
356 /* Stall can only be enabled if paging is enabled */
357 if (!rk_iommu_is_paging_enabled(iommu))
358 return 0;
359
360 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
361
Tomasz Figa0416bf62018-03-23 15:38:05 +0800362 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
363 val, RK_MMU_POLL_PERIOD_US,
364 RK_MMU_POLL_TIMEOUT_US);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800365 if (ret)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800366 for (i = 0; i < iommu->num_mmu; i++)
367 dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
368 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800369
370 return ret;
371}
372
373static int rk_iommu_disable_stall(struct rk_iommu *iommu)
374{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800375 int ret, i;
Tomasz Figa0416bf62018-03-23 15:38:05 +0800376 bool val;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800377
378 if (!rk_iommu_is_stall_active(iommu))
379 return 0;
380
381 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
382
Tomasz Figa0416bf62018-03-23 15:38:05 +0800383 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
384 !val, RK_MMU_POLL_PERIOD_US,
385 RK_MMU_POLL_TIMEOUT_US);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800386 if (ret)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800387 for (i = 0; i < iommu->num_mmu; i++)
388 dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
389 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800390
391 return ret;
392}
393
394static int rk_iommu_enable_paging(struct rk_iommu *iommu)
395{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800396 int ret, i;
Tomasz Figa0416bf62018-03-23 15:38:05 +0800397 bool val;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800398
399 if (rk_iommu_is_paging_enabled(iommu))
400 return 0;
401
402 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
403
Tomasz Figa0416bf62018-03-23 15:38:05 +0800404 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
405 val, RK_MMU_POLL_PERIOD_US,
406 RK_MMU_POLL_TIMEOUT_US);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800407 if (ret)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800408 for (i = 0; i < iommu->num_mmu; i++)
409 dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
410 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800411
412 return ret;
413}
414
415static int rk_iommu_disable_paging(struct rk_iommu *iommu)
416{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800417 int ret, i;
Tomasz Figa0416bf62018-03-23 15:38:05 +0800418 bool val;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800419
420 if (!rk_iommu_is_paging_enabled(iommu))
421 return 0;
422
423 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
424
Tomasz Figa0416bf62018-03-23 15:38:05 +0800425 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
426 !val, RK_MMU_POLL_PERIOD_US,
427 RK_MMU_POLL_TIMEOUT_US);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800428 if (ret)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800429 for (i = 0; i < iommu->num_mmu; i++)
430 dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
431 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800432
433 return ret;
434}
435
436static int rk_iommu_force_reset(struct rk_iommu *iommu)
437{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800438 int ret, i;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800439 u32 dte_addr;
Tomasz Figa0416bf62018-03-23 15:38:05 +0800440 bool val;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800441
Simon Xuec3aa4742017-07-24 10:37:15 +0800442 if (iommu->reset_disabled)
443 return 0;
444
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800445 /*
446 * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
447 * and verifying that upper 5 nybbles are read back.
448 */
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800449 for (i = 0; i < iommu->num_mmu; i++) {
450 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800451
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800452 dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR);
453 if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
454 dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
455 return -EFAULT;
456 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800457 }
458
459 rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
460
Tomasz Figa0416bf62018-03-23 15:38:05 +0800461 ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val,
462 val, RK_MMU_FORCE_RESET_TIMEOUT_US,
463 RK_MMU_POLL_TIMEOUT_US);
464 if (ret) {
465 dev_err(iommu->dev, "FORCE_RESET command timed out\n");
466 return ret;
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800467 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800468
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800469 return 0;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800470}
471
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800472static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800473{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800474 void __iomem *base = iommu->bases[index];
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800475 u32 dte_index, pte_index, page_offset;
476 u32 mmu_dte_addr;
477 phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
478 u32 *dte_addr;
479 u32 dte;
480 phys_addr_t pte_addr_phys = 0;
481 u32 *pte_addr = NULL;
482 u32 pte = 0;
483 phys_addr_t page_addr_phys = 0;
484 u32 page_flags = 0;
485
486 dte_index = rk_iova_dte_index(iova);
487 pte_index = rk_iova_pte_index(iova);
488 page_offset = rk_iova_page_offset(iova);
489
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800490 mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800491 mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
492
493 dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
494 dte_addr = phys_to_virt(dte_addr_phys);
495 dte = *dte_addr;
496
497 if (!rk_dte_is_pt_valid(dte))
498 goto print_it;
499
500 pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
501 pte_addr = phys_to_virt(pte_addr_phys);
502 pte = *pte_addr;
503
504 if (!rk_pte_is_page_valid(pte))
505 goto print_it;
506
507 page_addr_phys = rk_pte_page_address(pte) + page_offset;
508 page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
509
510print_it:
511 dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
512 &iova, dte_index, pte_index, page_offset);
513 dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
514 &mmu_dte_addr_phys, &dte_addr_phys, dte,
515 rk_dte_is_pt_valid(dte), &pte_addr_phys, pte,
516 rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
517}
518
519static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
520{
521 struct rk_iommu *iommu = dev_id;
522 u32 status;
523 u32 int_status;
524 dma_addr_t iova;
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800525 irqreturn_t ret = IRQ_NONE;
Marc Zyngier3fc7c5c2018-08-24 16:06:36 +0100526 int i, err;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800527
Marc Zyngier3fc7c5c2018-08-24 16:06:36 +0100528 err = pm_runtime_get_if_in_use(iommu->dev);
529 if (WARN_ON_ONCE(err <= 0))
530 return ret;
Jeffy Chen0f181d32018-03-23 15:38:13 +0800531
532 if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))
533 goto out;
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800534
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800535 for (i = 0; i < iommu->num_mmu; i++) {
536 int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
537 if (int_status == 0)
538 continue;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800539
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800540 ret = IRQ_HANDLED;
541 iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800542
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800543 if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
544 int flags;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800545
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800546 status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS);
547 flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
548 IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800549
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800550 dev_err(iommu->dev, "Page fault at %pad of type %s\n",
551 &iova,
552 (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800553
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800554 log_iova(iommu, i, iova);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800555
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800556 /*
557 * Report page fault to any installed handlers.
558 * Ignore the return code, though, since we always zap cache
559 * and clear the page fault anyway.
560 */
561 if (iommu->domain)
562 report_iommu_fault(iommu->domain, iommu->dev, iova,
563 flags);
564 else
565 dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800566
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800567 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
568 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
569 }
570
571 if (int_status & RK_MMU_IRQ_BUS_ERROR)
572 dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
573
574 if (int_status & ~RK_MMU_IRQ_MASK)
575 dev_err(iommu->dev, "unexpected int_status: %#08x\n",
576 int_status);
577
578 rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800579 }
580
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800581 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
582
Jeffy Chen0f181d32018-03-23 15:38:13 +0800583out:
584 pm_runtime_put(iommu->dev);
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800585 return ret;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800586}
587
588static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
589 dma_addr_t iova)
590{
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100591 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800592 unsigned long flags;
593 phys_addr_t pt_phys, phys = 0;
594 u32 dte, pte;
595 u32 *page_table;
596
597 spin_lock_irqsave(&rk_domain->dt_lock, flags);
598
599 dte = rk_domain->dt[rk_iova_dte_index(iova)];
600 if (!rk_dte_is_pt_valid(dte))
601 goto out;
602
603 pt_phys = rk_dte_pt_address(dte);
604 page_table = (u32 *)phys_to_virt(pt_phys);
605 pte = page_table[rk_iova_pte_index(iova)];
606 if (!rk_pte_is_page_valid(pte))
607 goto out;
608
609 phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
610out:
611 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
612
613 return phys;
614}
615
616static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
617 dma_addr_t iova, size_t size)
618{
619 struct list_head *pos;
620 unsigned long flags;
621
622 /* shootdown these iova from all iommus using this domain */
623 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
624 list_for_each(pos, &rk_domain->iommus) {
625 struct rk_iommu *iommu;
Marc Zyngier3fc7c5c2018-08-24 16:06:36 +0100626 int ret;
Jeffy Chen0f181d32018-03-23 15:38:13 +0800627
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800628 iommu = list_entry(pos, struct rk_iommu, node);
Jeffy Chen0f181d32018-03-23 15:38:13 +0800629
630 /* Only zap TLBs of IOMMUs that are powered on. */
Marc Zyngier3fc7c5c2018-08-24 16:06:36 +0100631 ret = pm_runtime_get_if_in_use(iommu->dev);
632 if (WARN_ON_ONCE(ret < 0))
633 continue;
634 if (ret) {
Jeffy Chen0f181d32018-03-23 15:38:13 +0800635 WARN_ON(clk_bulk_enable(iommu->num_clocks,
636 iommu->clocks));
637 rk_iommu_zap_lines(iommu, iova, size);
638 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
639 pm_runtime_put(iommu->dev);
640 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800641 }
642 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
643}
644
Tomasz Figad4dd9202015-04-20 20:43:44 +0900645static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
646 dma_addr_t iova, size_t size)
647{
648 rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
649 if (size > SPAGE_SIZE)
650 rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE,
651 SPAGE_SIZE);
652}
653
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800654static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
655 dma_addr_t iova)
656{
657 u32 *page_table, *dte_addr;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800658 u32 dte_index, dte;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800659 phys_addr_t pt_phys;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800660 dma_addr_t pt_dma;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800661
662 assert_spin_locked(&rk_domain->dt_lock);
663
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800664 dte_index = rk_iova_dte_index(iova);
665 dte_addr = &rk_domain->dt[dte_index];
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800666 dte = *dte_addr;
667 if (rk_dte_is_pt_valid(dte))
668 goto done;
669
670 page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
671 if (!page_table)
672 return ERR_PTR(-ENOMEM);
673
Jeffy Chen9176a302018-03-23 15:38:10 +0800674 pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
675 if (dma_mapping_error(dma_dev, pt_dma)) {
676 dev_err(dma_dev, "DMA mapping error while allocating page table\n");
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800677 free_page((unsigned long)page_table);
678 return ERR_PTR(-ENOMEM);
679 }
680
681 dte = rk_mk_dte(pt_dma);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800682 *dte_addr = dte;
683
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800684 rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES);
685 rk_table_flush(rk_domain,
686 rk_domain->dt_dma + dte_index * sizeof(u32), 1);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800687done:
688 pt_phys = rk_dte_pt_address(dte);
689 return (u32 *)phys_to_virt(pt_phys);
690}
691
692static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800693 u32 *pte_addr, dma_addr_t pte_dma,
694 size_t size)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800695{
696 unsigned int pte_count;
697 unsigned int pte_total = size / SPAGE_SIZE;
698
699 assert_spin_locked(&rk_domain->dt_lock);
700
701 for (pte_count = 0; pte_count < pte_total; pte_count++) {
702 u32 pte = pte_addr[pte_count];
703 if (!rk_pte_is_page_valid(pte))
704 break;
705
706 pte_addr[pte_count] = rk_mk_pte_invalid(pte);
707 }
708
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800709 rk_table_flush(rk_domain, pte_dma, pte_count);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800710
711 return pte_count * SPAGE_SIZE;
712}
713
714static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800715 dma_addr_t pte_dma, dma_addr_t iova,
716 phys_addr_t paddr, size_t size, int prot)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800717{
718 unsigned int pte_count;
719 unsigned int pte_total = size / SPAGE_SIZE;
720 phys_addr_t page_phys;
721
722 assert_spin_locked(&rk_domain->dt_lock);
723
724 for (pte_count = 0; pte_count < pte_total; pte_count++) {
725 u32 pte = pte_addr[pte_count];
726
727 if (rk_pte_is_page_valid(pte))
728 goto unwind;
729
730 pte_addr[pte_count] = rk_mk_pte(paddr, prot);
731
732 paddr += SPAGE_SIZE;
733 }
734
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800735 rk_table_flush(rk_domain, pte_dma, pte_total);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800736
Tomasz Figad4dd9202015-04-20 20:43:44 +0900737 /*
738 * Zap the first and last iova to evict from iotlb any previously
739 * mapped cachelines holding stale values for its dte and pte.
740 * We only zap the first and last iova, since only they could have
741 * dte or pte shared with an existing mapping.
742 */
743 rk_iommu_zap_iova_first_last(rk_domain, iova, size);
744
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800745 return 0;
746unwind:
747 /* Unmap the range of iovas that we just mapped */
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800748 rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
749 pte_count * SPAGE_SIZE);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800750
751 iova += pte_count * SPAGE_SIZE;
752 page_phys = rk_pte_page_address(pte_addr[pte_count]);
753 pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
754 &iova, &page_phys, &paddr, prot);
755
756 return -EADDRINUSE;
757}
758
759static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
760 phys_addr_t paddr, size_t size, int prot)
761{
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100762 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800763 unsigned long flags;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800764 dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800765 u32 *page_table, *pte_addr;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800766 u32 dte_index, pte_index;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800767 int ret;
768
769 spin_lock_irqsave(&rk_domain->dt_lock, flags);
770
771 /*
772 * pgsize_bitmap specifies iova sizes that fit in one page table
773 * (1024 4-KiB pages = 4 MiB).
774 * So, size will always be 4096 <= size <= 4194304.
775 * Since iommu_map() guarantees that both iova and size will be
776 * aligned, we will always only be mapping from a single dte here.
777 */
778 page_table = rk_dte_get_page_table(rk_domain, iova);
779 if (IS_ERR(page_table)) {
780 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
781 return PTR_ERR(page_table);
782 }
783
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800784 dte_index = rk_domain->dt[rk_iova_dte_index(iova)];
785 pte_index = rk_iova_pte_index(iova);
786 pte_addr = &page_table[pte_index];
787 pte_dma = rk_dte_pt_address(dte_index) + pte_index * sizeof(u32);
788 ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
789 paddr, size, prot);
790
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800791 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
792
793 return ret;
794}
795
796static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
Will Deacon56f8af52019-07-02 16:44:06 +0100797 size_t size, struct iommu_iotlb_gather *gather)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800798{
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100799 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800800 unsigned long flags;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800801 dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800802 phys_addr_t pt_phys;
803 u32 dte;
804 u32 *pte_addr;
805 size_t unmap_size;
806
807 spin_lock_irqsave(&rk_domain->dt_lock, flags);
808
809 /*
810 * pgsize_bitmap specifies iova sizes that fit in one page table
811 * (1024 4-KiB pages = 4 MiB).
812 * So, size will always be 4096 <= size <= 4194304.
813 * Since iommu_unmap() guarantees that both iova and size will be
814 * aligned, we will always only be unmapping from a single dte here.
815 */
816 dte = rk_domain->dt[rk_iova_dte_index(iova)];
817 /* Just return 0 if iova is unmapped */
818 if (!rk_dte_is_pt_valid(dte)) {
819 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
820 return 0;
821 }
822
823 pt_phys = rk_dte_pt_address(dte);
824 pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800825 pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
826 unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800827
828 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
829
830 /* Shootdown iotlb entries for iova range that was just unmapped */
831 rk_iommu_zap_iova(rk_domain, iova, unmap_size);
832
833 return unmap_size;
834}
835
836static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
837{
Jeffy Chen5fd577c2018-03-23 15:38:11 +0800838 struct rk_iommudata *data = dev->archdata.iommu;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800839
Jeffy Chen5fd577c2018-03-23 15:38:11 +0800840 return data ? data->iommu : NULL;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800841}
842
Jeffy Chen0f181d32018-03-23 15:38:13 +0800843/* Must be called with iommu powered on and attached */
844static void rk_iommu_disable(struct rk_iommu *iommu)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800845{
Jeffy Chen0f181d32018-03-23 15:38:13 +0800846 int i;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800847
Jeffy Chen0f181d32018-03-23 15:38:13 +0800848 /* Ignore error while disabling, just keep going */
849 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
850 rk_iommu_enable_stall(iommu);
851 rk_iommu_disable_paging(iommu);
852 for (i = 0; i < iommu->num_mmu; i++) {
853 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
854 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
855 }
856 rk_iommu_disable_stall(iommu);
857 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
858}
859
860/* Must be called with iommu powered on and attached */
861static int rk_iommu_enable(struct rk_iommu *iommu)
862{
863 struct iommu_domain *domain = iommu->domain;
864 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
865 int ret, i;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800866
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800867 ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800868 if (ret)
869 return ret;
870
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800871 ret = rk_iommu_enable_stall(iommu);
872 if (ret)
873 goto out_disable_clocks;
874
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800875 ret = rk_iommu_force_reset(iommu);
876 if (ret)
Tomasz Figaf6717d72018-03-23 15:38:04 +0800877 goto out_disable_stall;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800878
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800879 for (i = 0; i < iommu->num_mmu; i++) {
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800880 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
881 rk_domain->dt_dma);
John Keepingae8a7912016-06-01 16:46:10 +0100882 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800883 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
884 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800885
886 ret = rk_iommu_enable_paging(iommu);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800887
Tomasz Figaf6717d72018-03-23 15:38:04 +0800888out_disable_stall:
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800889 rk_iommu_disable_stall(iommu);
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800890out_disable_clocks:
891 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
Tomasz Figaf6717d72018-03-23 15:38:04 +0800892 return ret;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800893}
894
895static void rk_iommu_detach_device(struct iommu_domain *domain,
896 struct device *dev)
897{
898 struct rk_iommu *iommu;
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100899 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800900 unsigned long flags;
Marc Zyngier3fc7c5c2018-08-24 16:06:36 +0100901 int ret;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800902
903 /* Allow 'virtual devices' (eg drm) to detach from domain */
904 iommu = rk_iommu_from_dev(dev);
905 if (!iommu)
906 return;
907
Jeffy Chen0f181d32018-03-23 15:38:13 +0800908 dev_dbg(dev, "Detaching from iommu domain\n");
909
910 /* iommu already detached */
911 if (iommu->domain != domain)
912 return;
913
914 iommu->domain = NULL;
915
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800916 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
917 list_del_init(&iommu->node);
918 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
919
Marc Zyngier3fc7c5c2018-08-24 16:06:36 +0100920 ret = pm_runtime_get_if_in_use(iommu->dev);
921 WARN_ON_ONCE(ret < 0);
922 if (ret > 0) {
Jeffy Chen0f181d32018-03-23 15:38:13 +0800923 rk_iommu_disable(iommu);
924 pm_runtime_put(iommu->dev);
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800925 }
Jeffy Chen0f181d32018-03-23 15:38:13 +0800926}
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800927
Jeffy Chen0f181d32018-03-23 15:38:13 +0800928static int rk_iommu_attach_device(struct iommu_domain *domain,
929 struct device *dev)
930{
931 struct rk_iommu *iommu;
932 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
933 unsigned long flags;
934 int ret;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800935
Jeffy Chen0f181d32018-03-23 15:38:13 +0800936 /*
937 * Allow 'virtual devices' (e.g., drm) to attach to domain.
938 * Such a device does not belong to an iommu group.
939 */
940 iommu = rk_iommu_from_dev(dev);
941 if (!iommu)
942 return 0;
943
944 dev_dbg(dev, "Attaching to iommu domain\n");
945
946 /* iommu already attached */
947 if (iommu->domain == domain)
948 return 0;
949
950 if (iommu->domain)
951 rk_iommu_detach_device(iommu->domain, dev);
952
953 iommu->domain = domain;
954
955 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
956 list_add_tail(&iommu->node, &rk_domain->iommus);
957 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
958
Marc Zyngier3fc7c5c2018-08-24 16:06:36 +0100959 ret = pm_runtime_get_if_in_use(iommu->dev);
960 if (!ret || WARN_ON_ONCE(ret < 0))
Jeffy Chen0f181d32018-03-23 15:38:13 +0800961 return 0;
962
963 ret = rk_iommu_enable(iommu);
964 if (ret)
965 rk_iommu_detach_device(iommu->domain, dev);
966
967 pm_runtime_put(iommu->dev);
968
969 return ret;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800970}
971
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100972static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800973{
974 struct rk_iommu_domain *rk_domain;
975
Shunqian Zhenga93db2f2016-06-24 10:13:30 +0800976 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100977 return NULL;
978
Jeffy Chen9176a302018-03-23 15:38:10 +0800979 if (!dma_dev)
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100980 return NULL;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800981
Ezequiel Garcia42bb97b2019-10-02 14:29:23 -0300982 rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800983 if (!rk_domain)
Jeffy Chen9176a302018-03-23 15:38:10 +0800984 return NULL;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800985
Shunqian Zhenga93db2f2016-06-24 10:13:30 +0800986 if (type == IOMMU_DOMAIN_DMA &&
987 iommu_get_dma_cookie(&rk_domain->domain))
Ezequiel Garcia42bb97b2019-10-02 14:29:23 -0300988 goto err_free_domain;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800989
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800990 /*
991 * rk32xx iommus use a 2 level pagetable.
992 * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
993 * Allocate one 4 KiB page for each table.
994 */
995 rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
996 if (!rk_domain->dt)
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800997 goto err_put_cookie;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800998
Jeffy Chen9176a302018-03-23 15:38:10 +0800999 rk_domain->dt_dma = dma_map_single(dma_dev, rk_domain->dt,
Shunqian Zheng4f0aba62016-06-24 10:13:29 +08001000 SPAGE_SIZE, DMA_TO_DEVICE);
Jeffy Chen9176a302018-03-23 15:38:10 +08001001 if (dma_mapping_error(dma_dev, rk_domain->dt_dma)) {
1002 dev_err(dma_dev, "DMA map error for DT\n");
Shunqian Zheng4f0aba62016-06-24 10:13:29 +08001003 goto err_free_dt;
1004 }
1005
1006 rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001007
1008 spin_lock_init(&rk_domain->iommus_lock);
1009 spin_lock_init(&rk_domain->dt_lock);
1010 INIT_LIST_HEAD(&rk_domain->iommus);
1011
Shunqian Zhenga93db2f2016-06-24 10:13:30 +08001012 rk_domain->domain.geometry.aperture_start = 0;
1013 rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32);
1014 rk_domain->domain.geometry.force_aperture = true;
1015
Joerg Roedelbcd516a2015-03-26 13:43:17 +01001016 return &rk_domain->domain;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001017
Shunqian Zheng4f0aba62016-06-24 10:13:29 +08001018err_free_dt:
1019 free_page((unsigned long)rk_domain->dt);
1020err_put_cookie:
Shunqian Zhenga93db2f2016-06-24 10:13:30 +08001021 if (type == IOMMU_DOMAIN_DMA)
1022 iommu_put_dma_cookie(&rk_domain->domain);
Ezequiel Garcia42bb97b2019-10-02 14:29:23 -03001023err_free_domain:
1024 kfree(rk_domain);
Shunqian Zheng4f0aba62016-06-24 10:13:29 +08001025
Joerg Roedelbcd516a2015-03-26 13:43:17 +01001026 return NULL;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001027}
1028
Joerg Roedelbcd516a2015-03-26 13:43:17 +01001029static void rk_iommu_domain_free(struct iommu_domain *domain)
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001030{
Joerg Roedelbcd516a2015-03-26 13:43:17 +01001031 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001032 int i;
1033
1034 WARN_ON(!list_empty(&rk_domain->iommus));
1035
1036 for (i = 0; i < NUM_DT_ENTRIES; i++) {
1037 u32 dte = rk_domain->dt[i];
1038 if (rk_dte_is_pt_valid(dte)) {
1039 phys_addr_t pt_phys = rk_dte_pt_address(dte);
1040 u32 *page_table = phys_to_virt(pt_phys);
Jeffy Chen9176a302018-03-23 15:38:10 +08001041 dma_unmap_single(dma_dev, pt_phys,
Shunqian Zheng4f0aba62016-06-24 10:13:29 +08001042 SPAGE_SIZE, DMA_TO_DEVICE);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001043 free_page((unsigned long)page_table);
1044 }
1045 }
1046
Jeffy Chen9176a302018-03-23 15:38:10 +08001047 dma_unmap_single(dma_dev, rk_domain->dt_dma,
Shunqian Zheng4f0aba62016-06-24 10:13:29 +08001048 SPAGE_SIZE, DMA_TO_DEVICE);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001049 free_page((unsigned long)rk_domain->dt);
Shunqian Zheng4f0aba62016-06-24 10:13:29 +08001050
Shunqian Zhenga93db2f2016-06-24 10:13:30 +08001051 if (domain->type == IOMMU_DOMAIN_DMA)
1052 iommu_put_dma_cookie(&rk_domain->domain);
Ezequiel Garcia42bb97b2019-10-02 14:29:23 -03001053 kfree(rk_domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001054}
1055
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001056static int rk_iommu_add_device(struct device *dev)
1057{
1058 struct iommu_group *group;
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001059 struct rk_iommu *iommu;
Jeffy Chen0f181d32018-03-23 15:38:13 +08001060 struct rk_iommudata *data;
1061
1062 data = dev->archdata.iommu;
1063 if (!data)
1064 return -ENODEV;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001065
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001066 iommu = rk_iommu_from_dev(dev);
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001067
Jeffy Chen5fd577c2018-03-23 15:38:11 +08001068 group = iommu_group_get_for_dev(dev);
1069 if (IS_ERR(group))
1070 return PTR_ERR(group);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001071 iommu_group_put(group);
1072
Jeffy Chen5fd577c2018-03-23 15:38:11 +08001073 iommu_device_link(&iommu->iommu, dev);
Rafael J. Wysockiea4f6402019-02-01 01:54:21 +01001074 data->link = device_link_add(dev, iommu->dev,
1075 DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
Jeffy Chen5fd577c2018-03-23 15:38:11 +08001076
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001077 return 0;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001078}
1079
1080static void rk_iommu_remove_device(struct device *dev)
1081{
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001082 struct rk_iommu *iommu;
Jeffy Chen0f181d32018-03-23 15:38:13 +08001083 struct rk_iommudata *data = dev->archdata.iommu;
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001084
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001085 iommu = rk_iommu_from_dev(dev);
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001086
Jeffy Chen0f181d32018-03-23 15:38:13 +08001087 device_link_del(data->link);
Jeffy Chen5fd577c2018-03-23 15:38:11 +08001088 iommu_device_unlink(&iommu->iommu, dev);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001089 iommu_group_remove_device(dev);
1090}
1091
Jeffy Chen57c26952018-03-23 15:38:14 +08001092static struct iommu_group *rk_iommu_device_group(struct device *dev)
1093{
1094 struct rk_iommu *iommu;
1095
1096 iommu = rk_iommu_from_dev(dev);
1097
1098 return iommu_group_ref_get(iommu->group);
1099}
1100
Jeffy Chen5fd577c2018-03-23 15:38:11 +08001101static int rk_iommu_of_xlate(struct device *dev,
1102 struct of_phandle_args *args)
1103{
1104 struct platform_device *iommu_dev;
1105 struct rk_iommudata *data;
1106
1107 data = devm_kzalloc(dma_dev, sizeof(*data), GFP_KERNEL);
1108 if (!data)
1109 return -ENOMEM;
1110
1111 iommu_dev = of_find_device_by_node(args->np);
1112
1113 data->iommu = platform_get_drvdata(iommu_dev);
1114 dev->archdata.iommu = data;
1115
Arnd Bergmann40fa84e2018-04-04 12:23:53 +02001116 platform_device_put(iommu_dev);
Jeffy Chen5fd577c2018-03-23 15:38:11 +08001117
1118 return 0;
1119}
1120
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001121static const struct iommu_ops rk_iommu_ops = {
Joerg Roedelbcd516a2015-03-26 13:43:17 +01001122 .domain_alloc = rk_iommu_domain_alloc,
1123 .domain_free = rk_iommu_domain_free,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001124 .attach_dev = rk_iommu_attach_device,
1125 .detach_dev = rk_iommu_detach_device,
1126 .map = rk_iommu_map,
1127 .unmap = rk_iommu_unmap,
1128 .add_device = rk_iommu_add_device,
1129 .remove_device = rk_iommu_remove_device,
1130 .iova_to_phys = rk_iommu_iova_to_phys,
Jeffy Chen57c26952018-03-23 15:38:14 +08001131 .device_group = rk_iommu_device_group,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001132 .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
Jeffy Chen5fd577c2018-03-23 15:38:11 +08001133 .of_xlate = rk_iommu_of_xlate,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001134};
1135
1136static int rk_iommu_probe(struct platform_device *pdev)
1137{
1138 struct device *dev = &pdev->dev;
1139 struct rk_iommu *iommu;
1140 struct resource *res;
Shunqian Zheng3d08f4342016-06-24 10:13:28 +08001141 int num_res = pdev->num_resources;
Jeffy Chend0b912b2018-03-23 15:38:03 +08001142 int err, i, irq;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001143
1144 iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
1145 if (!iommu)
1146 return -ENOMEM;
1147
1148 platform_set_drvdata(pdev, iommu);
1149 iommu->dev = dev;
ZhengShunQiancd6438c2016-01-19 15:03:00 +08001150 iommu->num_mmu = 0;
Shunqian Zheng3d08f4342016-06-24 10:13:28 +08001151
Kees Cooka86854d2018-06-12 14:07:58 -07001152 iommu->bases = devm_kcalloc(dev, num_res, sizeof(*iommu->bases),
ZhengShunQiancd6438c2016-01-19 15:03:00 +08001153 GFP_KERNEL);
1154 if (!iommu->bases)
1155 return -ENOMEM;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001156
Shunqian Zheng3d08f4342016-06-24 10:13:28 +08001157 for (i = 0; i < num_res; i++) {
ZhengShunQiancd6438c2016-01-19 15:03:00 +08001158 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
Tomeu Vizoso8d7f2d82016-03-21 12:00:23 +01001159 if (!res)
1160 continue;
ZhengShunQiancd6438c2016-01-19 15:03:00 +08001161 iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
1162 if (IS_ERR(iommu->bases[i]))
1163 continue;
1164 iommu->num_mmu++;
1165 }
1166 if (iommu->num_mmu == 0)
1167 return PTR_ERR(iommu->bases[0]);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001168
Simon Xuec3aa4742017-07-24 10:37:15 +08001169 iommu->reset_disabled = device_property_read_bool(dev,
1170 "rockchip,disable-mmu-reset");
1171
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001172 iommu->num_clocks = ARRAY_SIZE(rk_iommu_clocks);
1173 iommu->clocks = devm_kcalloc(iommu->dev, iommu->num_clocks,
1174 sizeof(*iommu->clocks), GFP_KERNEL);
1175 if (!iommu->clocks)
1176 return -ENOMEM;
1177
1178 for (i = 0; i < iommu->num_clocks; ++i)
1179 iommu->clocks[i].id = rk_iommu_clocks[i];
1180
Heiko Stuebner2f8c7f22018-04-17 14:09:15 +02001181 /*
1182 * iommu clocks should be present for all new devices and devicetrees
1183 * but there are older devicetrees without clocks out in the wild.
1184 * So clocks as optional for the time being.
1185 */
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001186 err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks);
Heiko Stuebner2f8c7f22018-04-17 14:09:15 +02001187 if (err == -ENOENT)
1188 iommu->num_clocks = 0;
1189 else if (err)
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001190 return err;
1191
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001192 err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks);
1193 if (err)
1194 return err;
1195
Jeffy Chen57c26952018-03-23 15:38:14 +08001196 iommu->group = iommu_group_alloc();
1197 if (IS_ERR(iommu->group)) {
1198 err = PTR_ERR(iommu->group);
1199 goto err_unprepare_clocks;
1200 }
1201
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001202 err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
1203 if (err)
Jeffy Chen57c26952018-03-23 15:38:14 +08001204 goto err_put_group;
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001205
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001206 iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
Jeffy Chen5fd577c2018-03-23 15:38:11 +08001207 iommu_device_set_fwnode(&iommu->iommu, &dev->of_node->fwnode);
1208
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001209 err = iommu_device_register(&iommu->iommu);
Jeffy Chen6d9ffaa2018-03-23 15:38:02 +08001210 if (err)
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001211 goto err_remove_sysfs;
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001212
Jeffy Chen9176a302018-03-23 15:38:10 +08001213 /*
1214 * Use the first registered IOMMU device for domain to use with DMA
1215 * API, since a domain might not physically correspond to a single
1216 * IOMMU device..
1217 */
1218 if (!dma_dev)
1219 dma_dev = &pdev->dev;
1220
Jeffy Chen4d88a8a2018-03-23 15:38:12 +08001221 bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
1222
Jeffy Chen0f181d32018-03-23 15:38:13 +08001223 pm_runtime_enable(dev);
1224
Marc Zyngier1aa55ca2018-08-24 16:06:37 +01001225 i = 0;
1226 while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) {
1227 if (irq < 0)
1228 return irq;
1229
1230 err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
1231 IRQF_SHARED, dev_name(dev), iommu);
1232 if (err) {
1233 pm_runtime_disable(dev);
1234 goto err_remove_sysfs;
1235 }
1236 }
1237
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001238 return 0;
1239err_remove_sysfs:
1240 iommu_device_sysfs_remove(&iommu->iommu);
Jeffy Chen57c26952018-03-23 15:38:14 +08001241err_put_group:
1242 iommu_group_put(iommu->group);
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001243err_unprepare_clocks:
1244 clk_bulk_unprepare(iommu->num_clocks, iommu->clocks);
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001245 return err;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001246}
1247
Marc Zyngier1a4e90f2018-02-20 20:25:04 +00001248static void rk_iommu_shutdown(struct platform_device *pdev)
1249{
Heiko Stuebner74bc2ab2018-08-27 12:56:24 +02001250 struct rk_iommu *iommu = platform_get_drvdata(pdev);
1251 int i = 0, irq;
1252
1253 while ((irq = platform_get_irq(pdev, i++)) != -ENXIO)
1254 devm_free_irq(iommu->dev, irq, iommu);
1255
Jeffy Chen0f181d32018-03-23 15:38:13 +08001256 pm_runtime_force_suspend(&pdev->dev);
Marc Zyngier1a4e90f2018-02-20 20:25:04 +00001257}
1258
Jeffy Chen0f181d32018-03-23 15:38:13 +08001259static int __maybe_unused rk_iommu_suspend(struct device *dev)
1260{
1261 struct rk_iommu *iommu = dev_get_drvdata(dev);
1262
1263 if (!iommu->domain)
1264 return 0;
1265
1266 rk_iommu_disable(iommu);
1267 return 0;
1268}
1269
1270static int __maybe_unused rk_iommu_resume(struct device *dev)
1271{
1272 struct rk_iommu *iommu = dev_get_drvdata(dev);
1273
1274 if (!iommu->domain)
1275 return 0;
1276
1277 return rk_iommu_enable(iommu);
1278}
1279
1280static const struct dev_pm_ops rk_iommu_pm_ops = {
1281 SET_RUNTIME_PM_OPS(rk_iommu_suspend, rk_iommu_resume, NULL)
1282 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1283 pm_runtime_force_resume)
1284};
1285
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001286static const struct of_device_id rk_iommu_dt_ids[] = {
1287 { .compatible = "rockchip,iommu" },
1288 { /* sentinel */ }
1289};
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001290
1291static struct platform_driver rk_iommu_driver = {
1292 .probe = rk_iommu_probe,
Marc Zyngier1a4e90f2018-02-20 20:25:04 +00001293 .shutdown = rk_iommu_shutdown,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001294 .driver = {
1295 .name = "rk_iommu",
Arnd Bergmannd9e7eb12015-04-10 23:58:24 +02001296 .of_match_table = rk_iommu_dt_ids,
Jeffy Chen0f181d32018-03-23 15:38:13 +08001297 .pm = &rk_iommu_pm_ops,
Jeffy Chen98b72b92018-03-23 15:38:01 +08001298 .suppress_bind_attrs = true,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001299 },
1300};
1301
1302static int __init rk_iommu_init(void)
1303{
Jeffy Chen9176a302018-03-23 15:38:10 +08001304 return platform_driver_register(&rk_iommu_driver);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001305}
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001306subsys_initcall(rk_iommu_init);