blob: 7f23ad61c094fb30c3d0978269594e4b8c9b368e [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Daniel Kurtzc68a2922014-11-03 10:53:27 +08002/*
Paul Gortmaker669a0472018-12-01 14:19:10 -05003 * IOMMU API for Rockchip
4 *
5 * Module Authors: Simon Xue <xxm@rock-chips.com>
6 * Daniel Kurtz <djkurtz@chromium.org>
Daniel Kurtzc68a2922014-11-03 10:53:27 +08007 */
8
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08009#include <linux/clk.h>
Daniel Kurtzc68a2922014-11-03 10:53:27 +080010#include <linux/compiler.h>
11#include <linux/delay.h>
12#include <linux/device.h>
Joerg Roedel461a6942017-04-26 15:46:20 +020013#include <linux/dma-mapping.h>
Daniel Kurtzc68a2922014-11-03 10:53:27 +080014#include <linux/errno.h>
15#include <linux/interrupt.h>
16#include <linux/io.h>
17#include <linux/iommu.h>
Tomasz Figa0416bf62018-03-23 15:38:05 +080018#include <linux/iopoll.h>
Daniel Kurtzc68a2922014-11-03 10:53:27 +080019#include <linux/list.h>
20#include <linux/mm.h>
Paul Gortmaker669a0472018-12-01 14:19:10 -050021#include <linux/init.h>
Daniel Kurtzc68a2922014-11-03 10:53:27 +080022#include <linux/of.h>
23#include <linux/of_platform.h>
24#include <linux/platform_device.h>
Jeffy Chen0f181d32018-03-23 15:38:13 +080025#include <linux/pm_runtime.h>
Daniel Kurtzc68a2922014-11-03 10:53:27 +080026#include <linux/slab.h>
27#include <linux/spinlock.h>
28
29/** MMU register offsets */
30#define RK_MMU_DTE_ADDR 0x00 /* Directory table address */
31#define RK_MMU_STATUS 0x04
32#define RK_MMU_COMMAND 0x08
33#define RK_MMU_PAGE_FAULT_ADDR 0x0C /* IOVA of last page fault */
34#define RK_MMU_ZAP_ONE_LINE 0x10 /* Shootdown one IOTLB entry */
35#define RK_MMU_INT_RAWSTAT 0x14 /* IRQ status ignoring mask */
36#define RK_MMU_INT_CLEAR 0x18 /* Acknowledge and re-arm irq */
37#define RK_MMU_INT_MASK 0x1C /* IRQ enable */
38#define RK_MMU_INT_STATUS 0x20 /* IRQ status after masking */
39#define RK_MMU_AUTO_GATING 0x24
40
41#define DTE_ADDR_DUMMY 0xCAFEBABE
Tomasz Figa0416bf62018-03-23 15:38:05 +080042
43#define RK_MMU_POLL_PERIOD_US 100
44#define RK_MMU_FORCE_RESET_TIMEOUT_US 100000
45#define RK_MMU_POLL_TIMEOUT_US 1000
Daniel Kurtzc68a2922014-11-03 10:53:27 +080046
47/* RK_MMU_STATUS fields */
48#define RK_MMU_STATUS_PAGING_ENABLED BIT(0)
49#define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1)
50#define RK_MMU_STATUS_STALL_ACTIVE BIT(2)
51#define RK_MMU_STATUS_IDLE BIT(3)
52#define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4)
53#define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5)
54#define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31)
55
56/* RK_MMU_COMMAND command values */
57#define RK_MMU_CMD_ENABLE_PAGING 0 /* Enable memory translation */
58#define RK_MMU_CMD_DISABLE_PAGING 1 /* Disable memory translation */
59#define RK_MMU_CMD_ENABLE_STALL 2 /* Stall paging to allow other cmds */
60#define RK_MMU_CMD_DISABLE_STALL 3 /* Stop stall re-enables paging */
61#define RK_MMU_CMD_ZAP_CACHE 4 /* Shoot down entire IOTLB */
62#define RK_MMU_CMD_PAGE_FAULT_DONE 5 /* Clear page fault */
63#define RK_MMU_CMD_FORCE_RESET 6 /* Reset all registers */
64
65/* RK_MMU_INT_* register fields */
66#define RK_MMU_IRQ_PAGE_FAULT 0x01 /* page fault */
67#define RK_MMU_IRQ_BUS_ERROR 0x02 /* bus read error */
68#define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR)
69
70#define NUM_DT_ENTRIES 1024
71#define NUM_PT_ENTRIES 1024
72
73#define SPAGE_ORDER 12
74#define SPAGE_SIZE (1 << SPAGE_ORDER)
75
76 /*
77 * Support mapping any size that fits in one page table:
78 * 4 KiB to 4 MiB
79 */
80#define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
81
Daniel Kurtzc68a2922014-11-03 10:53:27 +080082struct rk_iommu_domain {
83 struct list_head iommus;
84 u32 *dt; /* page directory table */
Shunqian Zheng4f0aba62016-06-24 10:13:29 +080085 dma_addr_t dt_dma;
Daniel Kurtzc68a2922014-11-03 10:53:27 +080086 spinlock_t iommus_lock; /* lock for iommus list */
87 spinlock_t dt_lock; /* lock for modifying page directory table */
Joerg Roedelbcd516a2015-03-26 13:43:17 +010088
89 struct iommu_domain domain;
Daniel Kurtzc68a2922014-11-03 10:53:27 +080090};
91
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +080092/* list of clocks required by IOMMU */
93static const char * const rk_iommu_clocks[] = {
94 "aclk", "iface",
95};
96
Benjamin Gaignard227014b2021-06-04 18:44:40 +020097struct rk_iommu_ops {
98 phys_addr_t (*pt_address)(u32 dte);
99 u32 (*mk_dtentries)(dma_addr_t pt_dma);
100 u32 (*mk_ptentries)(phys_addr_t page, int prot);
101 phys_addr_t (*dte_addr_phys)(u32 addr);
102 u32 (*dma_addr_dte)(dma_addr_t dt_dma);
103 u64 dma_bit_mask;
104};
105
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800106struct rk_iommu {
107 struct device *dev;
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800108 void __iomem **bases;
109 int num_mmu;
Heiko Stuebnerf9258152019-09-25 20:43:46 +0200110 int num_irq;
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800111 struct clk_bulk_data *clocks;
112 int num_clocks;
Simon Xuec3aa4742017-07-24 10:37:15 +0800113 bool reset_disabled;
Joerg Roedelc9d9f232017-03-31 16:26:03 +0200114 struct iommu_device iommu;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800115 struct list_head node; /* entry in rk_iommu_domain.iommus */
116 struct iommu_domain *domain; /* domain to which iommu is attached */
Jeffy Chen57c26952018-03-23 15:38:14 +0800117 struct iommu_group *group;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800118};
119
Jeffy Chen5fd577c2018-03-23 15:38:11 +0800120struct rk_iommudata {
Jeffy Chen0f181d32018-03-23 15:38:13 +0800121 struct device_link *link; /* runtime PM link from IOMMU to master */
Jeffy Chen5fd577c2018-03-23 15:38:11 +0800122 struct rk_iommu *iommu;
123};
124
Jeffy Chen9176a302018-03-23 15:38:10 +0800125static struct device *dma_dev;
Benjamin Gaignard227014b2021-06-04 18:44:40 +0200126static const struct rk_iommu_ops *rk_ops;
Jeffy Chen9176a302018-03-23 15:38:10 +0800127
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800128static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
129 unsigned int count)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800130{
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800131 size_t size = count * sizeof(u32); /* count of u32 entry */
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800132
Jeffy Chen9176a302018-03-23 15:38:10 +0800133 dma_sync_single_for_device(dma_dev, dma, size, DMA_TO_DEVICE);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800134}
135
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100136static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
137{
138 return container_of(dom, struct rk_iommu_domain, domain);
139}
140
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800141/*
142 * The Rockchip rk3288 iommu uses a 2-level page table.
143 * The first level is the "Directory Table" (DT).
144 * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing
145 * to a "Page Table".
146 * The second level is the 1024 Page Tables (PT).
147 * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to
148 * a 4 KB page of physical memory.
149 *
150 * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries).
151 * Each iommu device has a MMU_DTE_ADDR register that contains the physical
152 * address of the start of the DT page.
153 *
154 * The structure of the page table is as follows:
155 *
156 * DT
157 * MMU_DTE_ADDR -> +-----+
158 * | |
159 * +-----+ PT
160 * | DTE | -> +-----+
161 * +-----+ | | Memory
162 * | | +-----+ Page
163 * | | | PTE | -> +-----+
164 * +-----+ +-----+ | |
165 * | | | |
166 * | | | |
167 * +-----+ | |
168 * | |
169 * | |
170 * +-----+
171 */
172
173/*
174 * Each DTE has a PT address and a valid bit:
175 * +---------------------+-----------+-+
176 * | PT address | Reserved |V|
177 * +---------------------+-----------+-+
178 * 31:12 - PT address (PTs always starts on a 4 KB boundary)
179 * 11: 1 - Reserved
180 * 0 - 1 if PT @ PT address is valid
181 */
182#define RK_DTE_PT_ADDRESS_MASK 0xfffff000
183#define RK_DTE_PT_VALID BIT(0)
184
185static inline phys_addr_t rk_dte_pt_address(u32 dte)
186{
187 return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
188}
189
Benjamin Gaignardc55356c2021-06-04 18:44:41 +0200190/*
191 * In v2:
192 * 31:12 - PT address bit 31:0
193 * 11: 8 - PT address bit 35:32
194 * 7: 4 - PT address bit 39:36
195 * 3: 1 - Reserved
196 * 0 - 1 if PT @ PT address is valid
197 */
198#define RK_DTE_PT_ADDRESS_MASK_V2 GENMASK_ULL(31, 4)
199#define DTE_HI_MASK1 GENMASK(11, 8)
200#define DTE_HI_MASK2 GENMASK(7, 4)
201#define DTE_HI_SHIFT1 24 /* shift bit 8 to bit 32 */
202#define DTE_HI_SHIFT2 32 /* shift bit 4 to bit 36 */
Alex Beef7ff3cf2021-11-24 03:13:25 +0100203#define PAGE_DESC_HI_MASK1 GENMASK_ULL(35, 32)
204#define PAGE_DESC_HI_MASK2 GENMASK_ULL(39, 36)
Benjamin Gaignardc55356c2021-06-04 18:44:41 +0200205
206static inline phys_addr_t rk_dte_pt_address_v2(u32 dte)
207{
208 u64 dte_v2 = dte;
209
210 dte_v2 = ((dte_v2 & DTE_HI_MASK2) << DTE_HI_SHIFT2) |
211 ((dte_v2 & DTE_HI_MASK1) << DTE_HI_SHIFT1) |
212 (dte_v2 & RK_DTE_PT_ADDRESS_MASK);
213
214 return (phys_addr_t)dte_v2;
215}
216
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800217static inline bool rk_dte_is_pt_valid(u32 dte)
218{
219 return dte & RK_DTE_PT_VALID;
220}
221
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800222static inline u32 rk_mk_dte(dma_addr_t pt_dma)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800223{
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800224 return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800225}
226
Benjamin Gaignardc55356c2021-06-04 18:44:41 +0200227static inline u32 rk_mk_dte_v2(dma_addr_t pt_dma)
228{
229 pt_dma = (pt_dma & RK_DTE_PT_ADDRESS_MASK) |
230 ((pt_dma & PAGE_DESC_HI_MASK1) >> DTE_HI_SHIFT1) |
231 (pt_dma & PAGE_DESC_HI_MASK2) >> DTE_HI_SHIFT2;
232
233 return (pt_dma & RK_DTE_PT_ADDRESS_MASK_V2) | RK_DTE_PT_VALID;
234}
235
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800236/*
237 * Each PTE has a Page address, some flags and a valid bit:
238 * +---------------------+---+-------+-+
239 * | Page address |Rsv| Flags |V|
240 * +---------------------+---+-------+-+
241 * 31:12 - Page address (Pages always start on a 4 KB boundary)
242 * 11: 9 - Reserved
243 * 8: 1 - Flags
244 * 8 - Read allocate - allocate cache space on read misses
245 * 7 - Read cache - enable cache & prefetch of data
246 * 6 - Write buffer - enable delaying writes on their way to memory
247 * 5 - Write allocate - allocate cache space on write misses
248 * 4 - Write cache - different writes can be merged together
249 * 3 - Override cache attributes
250 * if 1, bits 4-8 control cache attributes
251 * if 0, the system bus defaults are used
252 * 2 - Writable
253 * 1 - Readable
254 * 0 - 1 if Page @ Page address is valid
255 */
256#define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000
257#define RK_PTE_PAGE_FLAGS_MASK 0x000001fe
258#define RK_PTE_PAGE_WRITABLE BIT(2)
259#define RK_PTE_PAGE_READABLE BIT(1)
260#define RK_PTE_PAGE_VALID BIT(0)
261
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800262static inline bool rk_pte_is_page_valid(u32 pte)
263{
264 return pte & RK_PTE_PAGE_VALID;
265}
266
267/* TODO: set cache flags per prot IOMMU_CACHE */
268static u32 rk_mk_pte(phys_addr_t page, int prot)
269{
270 u32 flags = 0;
271 flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
272 flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
273 page &= RK_PTE_PAGE_ADDRESS_MASK;
274 return page | flags | RK_PTE_PAGE_VALID;
275}
276
Benjamin Gaignardc55356c2021-06-04 18:44:41 +0200277/*
278 * In v2:
279 * 31:12 - Page address bit 31:0
280 * 11:9 - Page address bit 34:32
281 * 8:4 - Page address bit 39:35
282 * 3 - Security
283 * 2 - Readable
284 * 1 - Writable
285 * 0 - 1 if Page @ Page address is valid
286 */
287#define RK_PTE_PAGE_READABLE_V2 BIT(2)
288#define RK_PTE_PAGE_WRITABLE_V2 BIT(1)
289
290static u32 rk_mk_pte_v2(phys_addr_t page, int prot)
291{
292 u32 flags = 0;
293
294 flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE_V2 : 0;
295 flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE_V2 : 0;
296
297 return rk_mk_dte_v2(page) | flags;
298}
299
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800300static u32 rk_mk_pte_invalid(u32 pte)
301{
302 return pte & ~RK_PTE_PAGE_VALID;
303}
304
305/*
306 * rk3288 iova (IOMMU Virtual Address) format
307 * 31 22.21 12.11 0
308 * +-----------+-----------+-------------+
309 * | DTE index | PTE index | Page offset |
310 * +-----------+-----------+-------------+
311 * 31:22 - DTE index - index of DTE in DT
312 * 21:12 - PTE index - index of PTE in PT @ DTE.pt_address
313 * 11: 0 - Page offset - offset into page @ PTE.page_address
314 */
315#define RK_IOVA_DTE_MASK 0xffc00000
316#define RK_IOVA_DTE_SHIFT 22
317#define RK_IOVA_PTE_MASK 0x003ff000
318#define RK_IOVA_PTE_SHIFT 12
319#define RK_IOVA_PAGE_MASK 0x00000fff
320#define RK_IOVA_PAGE_SHIFT 0
321
322static u32 rk_iova_dte_index(dma_addr_t iova)
323{
324 return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
325}
326
327static u32 rk_iova_pte_index(dma_addr_t iova)
328{
329 return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
330}
331
332static u32 rk_iova_page_offset(dma_addr_t iova)
333{
334 return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
335}
336
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800337static u32 rk_iommu_read(void __iomem *base, u32 offset)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800338{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800339 return readl(base + offset);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800340}
341
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800342static void rk_iommu_write(void __iomem *base, u32 offset, u32 value)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800343{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800344 writel(value, base + offset);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800345}
346
347static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
348{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800349 int i;
350
351 for (i = 0; i < iommu->num_mmu; i++)
352 writel(command, iommu->bases[i] + RK_MMU_COMMAND);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800353}
354
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800355static void rk_iommu_base_command(void __iomem *base, u32 command)
356{
357 writel(command, base + RK_MMU_COMMAND);
358}
Tomasz Figabf2a5e72018-03-23 15:38:06 +0800359static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start,
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800360 size_t size)
361{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800362 int i;
Tomasz Figabf2a5e72018-03-23 15:38:06 +0800363 dma_addr_t iova_end = iova_start + size;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800364 /*
365 * TODO(djkurtz): Figure out when it is more efficient to shootdown the
366 * entire iotlb rather than iterate over individual iovas.
367 */
Tomasz Figabf2a5e72018-03-23 15:38:06 +0800368 for (i = 0; i < iommu->num_mmu; i++) {
369 dma_addr_t iova;
370
371 for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800372 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
Tomasz Figabf2a5e72018-03-23 15:38:06 +0800373 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800374}
375
376static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
377{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800378 bool active = true;
379 int i;
380
381 for (i = 0; i < iommu->num_mmu; i++)
John Keepingfbedd9b2016-04-05 15:05:46 +0100382 active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
383 RK_MMU_STATUS_STALL_ACTIVE);
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800384
385 return active;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800386}
387
388static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
389{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800390 bool enable = true;
391 int i;
392
393 for (i = 0; i < iommu->num_mmu; i++)
John Keepingfbedd9b2016-04-05 15:05:46 +0100394 enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
395 RK_MMU_STATUS_PAGING_ENABLED);
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800396
397 return enable;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800398}
399
Tomasz Figa0416bf62018-03-23 15:38:05 +0800400static bool rk_iommu_is_reset_done(struct rk_iommu *iommu)
401{
402 bool done = true;
403 int i;
404
405 for (i = 0; i < iommu->num_mmu; i++)
406 done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0;
407
408 return done;
409}
410
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800411static int rk_iommu_enable_stall(struct rk_iommu *iommu)
412{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800413 int ret, i;
Tomasz Figa0416bf62018-03-23 15:38:05 +0800414 bool val;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800415
416 if (rk_iommu_is_stall_active(iommu))
417 return 0;
418
419 /* Stall can only be enabled if paging is enabled */
420 if (!rk_iommu_is_paging_enabled(iommu))
421 return 0;
422
423 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
424
Tomasz Figa0416bf62018-03-23 15:38:05 +0800425 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
426 val, RK_MMU_POLL_PERIOD_US,
427 RK_MMU_POLL_TIMEOUT_US);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800428 if (ret)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800429 for (i = 0; i < iommu->num_mmu; i++)
430 dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
431 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800432
433 return ret;
434}
435
436static int rk_iommu_disable_stall(struct rk_iommu *iommu)
437{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800438 int ret, i;
Tomasz Figa0416bf62018-03-23 15:38:05 +0800439 bool val;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800440
441 if (!rk_iommu_is_stall_active(iommu))
442 return 0;
443
444 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
445
Tomasz Figa0416bf62018-03-23 15:38:05 +0800446 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
447 !val, RK_MMU_POLL_PERIOD_US,
448 RK_MMU_POLL_TIMEOUT_US);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800449 if (ret)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800450 for (i = 0; i < iommu->num_mmu; i++)
451 dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
452 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800453
454 return ret;
455}
456
457static int rk_iommu_enable_paging(struct rk_iommu *iommu)
458{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800459 int ret, i;
Tomasz Figa0416bf62018-03-23 15:38:05 +0800460 bool val;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800461
462 if (rk_iommu_is_paging_enabled(iommu))
463 return 0;
464
465 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
466
Tomasz Figa0416bf62018-03-23 15:38:05 +0800467 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
468 val, RK_MMU_POLL_PERIOD_US,
469 RK_MMU_POLL_TIMEOUT_US);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800470 if (ret)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800471 for (i = 0; i < iommu->num_mmu; i++)
472 dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
473 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800474
475 return ret;
476}
477
478static int rk_iommu_disable_paging(struct rk_iommu *iommu)
479{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800480 int ret, i;
Tomasz Figa0416bf62018-03-23 15:38:05 +0800481 bool val;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800482
483 if (!rk_iommu_is_paging_enabled(iommu))
484 return 0;
485
486 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
487
Tomasz Figa0416bf62018-03-23 15:38:05 +0800488 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
489 !val, RK_MMU_POLL_PERIOD_US,
490 RK_MMU_POLL_TIMEOUT_US);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800491 if (ret)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800492 for (i = 0; i < iommu->num_mmu; i++)
493 dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
494 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800495
496 return ret;
497}
498
499static int rk_iommu_force_reset(struct rk_iommu *iommu)
500{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800501 int ret, i;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800502 u32 dte_addr;
Tomasz Figa0416bf62018-03-23 15:38:05 +0800503 bool val;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800504
Simon Xuec3aa4742017-07-24 10:37:15 +0800505 if (iommu->reset_disabled)
506 return 0;
507
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800508 /*
509 * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
510 * and verifying that upper 5 nybbles are read back.
511 */
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800512 for (i = 0; i < iommu->num_mmu; i++) {
Benjamin Gaignard227014b2021-06-04 18:44:40 +0200513 dte_addr = rk_ops->pt_address(DTE_ADDR_DUMMY);
514 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800515
Benjamin Gaignard227014b2021-06-04 18:44:40 +0200516 if (dte_addr != rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR)) {
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800517 dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
518 return -EFAULT;
519 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800520 }
521
522 rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
523
Tomasz Figa0416bf62018-03-23 15:38:05 +0800524 ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val,
525 val, RK_MMU_FORCE_RESET_TIMEOUT_US,
526 RK_MMU_POLL_TIMEOUT_US);
527 if (ret) {
528 dev_err(iommu->dev, "FORCE_RESET command timed out\n");
529 return ret;
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800530 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800531
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800532 return 0;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800533}
534
Benjamin Gaignard227014b2021-06-04 18:44:40 +0200535static inline phys_addr_t rk_dte_addr_phys(u32 addr)
536{
537 return (phys_addr_t)addr;
538}
539
540static inline u32 rk_dma_addr_dte(dma_addr_t dt_dma)
541{
542 return dt_dma;
543}
544
Benjamin Gaignardc55356c2021-06-04 18:44:41 +0200545#define DT_HI_MASK GENMASK_ULL(39, 32)
Benjamin Gaignardc987b652021-07-12 12:12:32 +0200546#define DTE_BASE_HI_MASK GENMASK(11, 4)
Benjamin Gaignardc55356c2021-06-04 18:44:41 +0200547#define DT_SHIFT 28
548
549static inline phys_addr_t rk_dte_addr_phys_v2(u32 addr)
550{
Benjamin Gaignardc987b652021-07-12 12:12:32 +0200551 u64 addr64 = addr;
552 return (phys_addr_t)(addr64 & RK_DTE_PT_ADDRESS_MASK) |
553 ((addr64 & DTE_BASE_HI_MASK) << DT_SHIFT);
Benjamin Gaignardc55356c2021-06-04 18:44:41 +0200554}
555
556static inline u32 rk_dma_addr_dte_v2(dma_addr_t dt_dma)
557{
558 return (dt_dma & RK_DTE_PT_ADDRESS_MASK) |
559 ((dt_dma & DT_HI_MASK) >> DT_SHIFT);
560}
561
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800562static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800563{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800564 void __iomem *base = iommu->bases[index];
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800565 u32 dte_index, pte_index, page_offset;
566 u32 mmu_dte_addr;
567 phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
568 u32 *dte_addr;
569 u32 dte;
570 phys_addr_t pte_addr_phys = 0;
571 u32 *pte_addr = NULL;
572 u32 pte = 0;
573 phys_addr_t page_addr_phys = 0;
574 u32 page_flags = 0;
575
576 dte_index = rk_iova_dte_index(iova);
577 pte_index = rk_iova_pte_index(iova);
578 page_offset = rk_iova_page_offset(iova);
579
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800580 mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
Benjamin Gaignard227014b2021-06-04 18:44:40 +0200581 mmu_dte_addr_phys = rk_ops->dte_addr_phys(mmu_dte_addr);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800582
583 dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
584 dte_addr = phys_to_virt(dte_addr_phys);
585 dte = *dte_addr;
586
587 if (!rk_dte_is_pt_valid(dte))
588 goto print_it;
589
Benjamin Gaignard227014b2021-06-04 18:44:40 +0200590 pte_addr_phys = rk_ops->pt_address(dte) + (pte_index * 4);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800591 pte_addr = phys_to_virt(pte_addr_phys);
592 pte = *pte_addr;
593
594 if (!rk_pte_is_page_valid(pte))
595 goto print_it;
596
Benjamin Gaignard227014b2021-06-04 18:44:40 +0200597 page_addr_phys = rk_ops->pt_address(pte) + page_offset;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800598 page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
599
600print_it:
601 dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
602 &iova, dte_index, pte_index, page_offset);
603 dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
604 &mmu_dte_addr_phys, &dte_addr_phys, dte,
605 rk_dte_is_pt_valid(dte), &pte_addr_phys, pte,
606 rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
607}
608
609static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
610{
611 struct rk_iommu *iommu = dev_id;
612 u32 status;
613 u32 int_status;
614 dma_addr_t iova;
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800615 irqreturn_t ret = IRQ_NONE;
Marc Zyngier3fc7c5c2018-08-24 16:06:36 +0100616 int i, err;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800617
Marc Zyngier3fc7c5c2018-08-24 16:06:36 +0100618 err = pm_runtime_get_if_in_use(iommu->dev);
Robin Murphy5b477482019-11-11 18:55:18 +0000619 if (!err || WARN_ON_ONCE(err < 0))
Marc Zyngier3fc7c5c2018-08-24 16:06:36 +0100620 return ret;
Jeffy Chen0f181d32018-03-23 15:38:13 +0800621
622 if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))
623 goto out;
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800624
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800625 for (i = 0; i < iommu->num_mmu; i++) {
626 int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
627 if (int_status == 0)
628 continue;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800629
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800630 ret = IRQ_HANDLED;
631 iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800632
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800633 if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
634 int flags;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800635
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800636 status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS);
637 flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
638 IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800639
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800640 dev_err(iommu->dev, "Page fault at %pad of type %s\n",
641 &iova,
642 (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800643
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800644 log_iova(iommu, i, iova);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800645
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800646 /*
647 * Report page fault to any installed handlers.
648 * Ignore the return code, though, since we always zap cache
649 * and clear the page fault anyway.
650 */
651 if (iommu->domain)
652 report_iommu_fault(iommu->domain, iommu->dev, iova,
653 flags);
654 else
655 dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800656
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800657 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
658 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
659 }
660
661 if (int_status & RK_MMU_IRQ_BUS_ERROR)
662 dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
663
664 if (int_status & ~RK_MMU_IRQ_MASK)
665 dev_err(iommu->dev, "unexpected int_status: %#08x\n",
666 int_status);
667
668 rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800669 }
670
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800671 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
672
Jeffy Chen0f181d32018-03-23 15:38:13 +0800673out:
674 pm_runtime_put(iommu->dev);
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800675 return ret;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800676}
677
678static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
679 dma_addr_t iova)
680{
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100681 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800682 unsigned long flags;
683 phys_addr_t pt_phys, phys = 0;
684 u32 dte, pte;
685 u32 *page_table;
686
687 spin_lock_irqsave(&rk_domain->dt_lock, flags);
688
689 dte = rk_domain->dt[rk_iova_dte_index(iova)];
690 if (!rk_dte_is_pt_valid(dte))
691 goto out;
692
Benjamin Gaignard227014b2021-06-04 18:44:40 +0200693 pt_phys = rk_ops->pt_address(dte);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800694 page_table = (u32 *)phys_to_virt(pt_phys);
695 pte = page_table[rk_iova_pte_index(iova)];
696 if (!rk_pte_is_page_valid(pte))
697 goto out;
698
Benjamin Gaignard227014b2021-06-04 18:44:40 +0200699 phys = rk_ops->pt_address(pte) + rk_iova_page_offset(iova);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800700out:
701 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
702
703 return phys;
704}
705
706static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
707 dma_addr_t iova, size_t size)
708{
709 struct list_head *pos;
710 unsigned long flags;
711
712 /* shootdown these iova from all iommus using this domain */
713 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
714 list_for_each(pos, &rk_domain->iommus) {
715 struct rk_iommu *iommu;
Marc Zyngier3fc7c5c2018-08-24 16:06:36 +0100716 int ret;
Jeffy Chen0f181d32018-03-23 15:38:13 +0800717
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800718 iommu = list_entry(pos, struct rk_iommu, node);
Jeffy Chen0f181d32018-03-23 15:38:13 +0800719
720 /* Only zap TLBs of IOMMUs that are powered on. */
Marc Zyngier3fc7c5c2018-08-24 16:06:36 +0100721 ret = pm_runtime_get_if_in_use(iommu->dev);
722 if (WARN_ON_ONCE(ret < 0))
723 continue;
724 if (ret) {
Jeffy Chen0f181d32018-03-23 15:38:13 +0800725 WARN_ON(clk_bulk_enable(iommu->num_clocks,
726 iommu->clocks));
727 rk_iommu_zap_lines(iommu, iova, size);
728 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
729 pm_runtime_put(iommu->dev);
730 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800731 }
732 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
733}
734
Tomasz Figad4dd9202015-04-20 20:43:44 +0900735static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
736 dma_addr_t iova, size_t size)
737{
738 rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
739 if (size > SPAGE_SIZE)
740 rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE,
741 SPAGE_SIZE);
742}
743
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800744static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
745 dma_addr_t iova)
746{
747 u32 *page_table, *dte_addr;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800748 u32 dte_index, dte;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800749 phys_addr_t pt_phys;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800750 dma_addr_t pt_dma;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800751
752 assert_spin_locked(&rk_domain->dt_lock);
753
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800754 dte_index = rk_iova_dte_index(iova);
755 dte_addr = &rk_domain->dt[dte_index];
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800756 dte = *dte_addr;
757 if (rk_dte_is_pt_valid(dte))
758 goto done;
759
760 page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
761 if (!page_table)
762 return ERR_PTR(-ENOMEM);
763
Jeffy Chen9176a302018-03-23 15:38:10 +0800764 pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
765 if (dma_mapping_error(dma_dev, pt_dma)) {
766 dev_err(dma_dev, "DMA mapping error while allocating page table\n");
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800767 free_page((unsigned long)page_table);
768 return ERR_PTR(-ENOMEM);
769 }
770
Benjamin Gaignard227014b2021-06-04 18:44:40 +0200771 dte = rk_ops->mk_dtentries(pt_dma);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800772 *dte_addr = dte;
773
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800774 rk_table_flush(rk_domain,
775 rk_domain->dt_dma + dte_index * sizeof(u32), 1);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800776done:
Benjamin Gaignard227014b2021-06-04 18:44:40 +0200777 pt_phys = rk_ops->pt_address(dte);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800778 return (u32 *)phys_to_virt(pt_phys);
779}
780
781static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800782 u32 *pte_addr, dma_addr_t pte_dma,
783 size_t size)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800784{
785 unsigned int pte_count;
786 unsigned int pte_total = size / SPAGE_SIZE;
787
788 assert_spin_locked(&rk_domain->dt_lock);
789
790 for (pte_count = 0; pte_count < pte_total; pte_count++) {
791 u32 pte = pte_addr[pte_count];
792 if (!rk_pte_is_page_valid(pte))
793 break;
794
795 pte_addr[pte_count] = rk_mk_pte_invalid(pte);
796 }
797
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800798 rk_table_flush(rk_domain, pte_dma, pte_count);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800799
800 return pte_count * SPAGE_SIZE;
801}
802
803static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800804 dma_addr_t pte_dma, dma_addr_t iova,
805 phys_addr_t paddr, size_t size, int prot)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800806{
807 unsigned int pte_count;
808 unsigned int pte_total = size / SPAGE_SIZE;
809 phys_addr_t page_phys;
810
811 assert_spin_locked(&rk_domain->dt_lock);
812
813 for (pte_count = 0; pte_count < pte_total; pte_count++) {
814 u32 pte = pte_addr[pte_count];
815
816 if (rk_pte_is_page_valid(pte))
817 goto unwind;
818
Benjamin Gaignard227014b2021-06-04 18:44:40 +0200819 pte_addr[pte_count] = rk_ops->mk_ptentries(paddr, prot);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800820
821 paddr += SPAGE_SIZE;
822 }
823
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800824 rk_table_flush(rk_domain, pte_dma, pte_total);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800825
Tomasz Figad4dd9202015-04-20 20:43:44 +0900826 /*
827 * Zap the first and last iova to evict from iotlb any previously
828 * mapped cachelines holding stale values for its dte and pte.
829 * We only zap the first and last iova, since only they could have
830 * dte or pte shared with an existing mapping.
831 */
832 rk_iommu_zap_iova_first_last(rk_domain, iova, size);
833
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800834 return 0;
835unwind:
836 /* Unmap the range of iovas that we just mapped */
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800837 rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
838 pte_count * SPAGE_SIZE);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800839
840 iova += pte_count * SPAGE_SIZE;
Benjamin Gaignard227014b2021-06-04 18:44:40 +0200841 page_phys = rk_ops->pt_address(pte_addr[pte_count]);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800842 pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
843 &iova, &page_phys, &paddr, prot);
844
845 return -EADDRINUSE;
846}
847
848static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
Tom Murphy781ca2d2019-09-08 09:56:38 -0700849 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800850{
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100851 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800852 unsigned long flags;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800853 dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800854 u32 *page_table, *pte_addr;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800855 u32 dte_index, pte_index;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800856 int ret;
857
858 spin_lock_irqsave(&rk_domain->dt_lock, flags);
859
860 /*
861 * pgsize_bitmap specifies iova sizes that fit in one page table
862 * (1024 4-KiB pages = 4 MiB).
863 * So, size will always be 4096 <= size <= 4194304.
864 * Since iommu_map() guarantees that both iova and size will be
865 * aligned, we will always only be mapping from a single dte here.
866 */
867 page_table = rk_dte_get_page_table(rk_domain, iova);
868 if (IS_ERR(page_table)) {
869 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
870 return PTR_ERR(page_table);
871 }
872
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800873 dte_index = rk_domain->dt[rk_iova_dte_index(iova)];
874 pte_index = rk_iova_pte_index(iova);
875 pte_addr = &page_table[pte_index];
Benjamin Gaignard227014b2021-06-04 18:44:40 +0200876
877 pte_dma = rk_ops->pt_address(dte_index) + pte_index * sizeof(u32);
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800878 ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
879 paddr, size, prot);
880
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800881 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
882
883 return ret;
884}
885
886static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
Will Deacon56f8af52019-07-02 16:44:06 +0100887 size_t size, struct iommu_iotlb_gather *gather)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800888{
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100889 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800890 unsigned long flags;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800891 dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800892 phys_addr_t pt_phys;
893 u32 dte;
894 u32 *pte_addr;
895 size_t unmap_size;
896
897 spin_lock_irqsave(&rk_domain->dt_lock, flags);
898
899 /*
900 * pgsize_bitmap specifies iova sizes that fit in one page table
901 * (1024 4-KiB pages = 4 MiB).
902 * So, size will always be 4096 <= size <= 4194304.
903 * Since iommu_unmap() guarantees that both iova and size will be
904 * aligned, we will always only be unmapping from a single dte here.
905 */
906 dte = rk_domain->dt[rk_iova_dte_index(iova)];
907 /* Just return 0 if iova is unmapped */
908 if (!rk_dte_is_pt_valid(dte)) {
909 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
910 return 0;
911 }
912
Benjamin Gaignard227014b2021-06-04 18:44:40 +0200913 pt_phys = rk_ops->pt_address(dte);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800914 pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800915 pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
916 unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800917
918 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
919
920 /* Shootdown iotlb entries for iova range that was just unmapped */
921 rk_iommu_zap_iova(rk_domain, iova, unmap_size);
922
923 return unmap_size;
924}
925
926static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
927{
Joerg Roedel8b9cc3b2020-06-25 15:08:28 +0200928 struct rk_iommudata *data = dev_iommu_priv_get(dev);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800929
Jeffy Chen5fd577c2018-03-23 15:38:11 +0800930 return data ? data->iommu : NULL;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800931}
932
Jeffy Chen0f181d32018-03-23 15:38:13 +0800933/* Must be called with iommu powered on and attached */
934static void rk_iommu_disable(struct rk_iommu *iommu)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800935{
Jeffy Chen0f181d32018-03-23 15:38:13 +0800936 int i;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800937
Jeffy Chen0f181d32018-03-23 15:38:13 +0800938 /* Ignore error while disabling, just keep going */
939 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
940 rk_iommu_enable_stall(iommu);
941 rk_iommu_disable_paging(iommu);
942 for (i = 0; i < iommu->num_mmu; i++) {
943 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
944 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
945 }
946 rk_iommu_disable_stall(iommu);
947 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
948}
949
950/* Must be called with iommu powered on and attached */
951static int rk_iommu_enable(struct rk_iommu *iommu)
952{
953 struct iommu_domain *domain = iommu->domain;
954 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
955 int ret, i;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800956
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800957 ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800958 if (ret)
959 return ret;
960
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800961 ret = rk_iommu_enable_stall(iommu);
962 if (ret)
963 goto out_disable_clocks;
964
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800965 ret = rk_iommu_force_reset(iommu);
966 if (ret)
Tomasz Figaf6717d72018-03-23 15:38:04 +0800967 goto out_disable_stall;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800968
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800969 for (i = 0; i < iommu->num_mmu; i++) {
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800970 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
Benjamin Gaignard227014b2021-06-04 18:44:40 +0200971 rk_ops->dma_addr_dte(rk_domain->dt_dma));
John Keepingae8a7912016-06-01 16:46:10 +0100972 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800973 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
974 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800975
976 ret = rk_iommu_enable_paging(iommu);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800977
Tomasz Figaf6717d72018-03-23 15:38:04 +0800978out_disable_stall:
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800979 rk_iommu_disable_stall(iommu);
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800980out_disable_clocks:
981 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
Tomasz Figaf6717d72018-03-23 15:38:04 +0800982 return ret;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800983}
984
985static void rk_iommu_detach_device(struct iommu_domain *domain,
986 struct device *dev)
987{
988 struct rk_iommu *iommu;
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100989 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800990 unsigned long flags;
Marc Zyngier3fc7c5c2018-08-24 16:06:36 +0100991 int ret;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800992
993 /* Allow 'virtual devices' (eg drm) to detach from domain */
994 iommu = rk_iommu_from_dev(dev);
995 if (!iommu)
996 return;
997
Jeffy Chen0f181d32018-03-23 15:38:13 +0800998 dev_dbg(dev, "Detaching from iommu domain\n");
999
1000 /* iommu already detached */
1001 if (iommu->domain != domain)
1002 return;
1003
1004 iommu->domain = NULL;
1005
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001006 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
1007 list_del_init(&iommu->node);
1008 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
1009
Marc Zyngier3fc7c5c2018-08-24 16:06:36 +01001010 ret = pm_runtime_get_if_in_use(iommu->dev);
1011 WARN_ON_ONCE(ret < 0);
1012 if (ret > 0) {
Jeffy Chen0f181d32018-03-23 15:38:13 +08001013 rk_iommu_disable(iommu);
1014 pm_runtime_put(iommu->dev);
ZhengShunQiancd6438c2016-01-19 15:03:00 +08001015 }
Jeffy Chen0f181d32018-03-23 15:38:13 +08001016}
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001017
Jeffy Chen0f181d32018-03-23 15:38:13 +08001018static int rk_iommu_attach_device(struct iommu_domain *domain,
1019 struct device *dev)
1020{
1021 struct rk_iommu *iommu;
1022 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
1023 unsigned long flags;
1024 int ret;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001025
Jeffy Chen0f181d32018-03-23 15:38:13 +08001026 /*
1027 * Allow 'virtual devices' (e.g., drm) to attach to domain.
1028 * Such a device does not belong to an iommu group.
1029 */
1030 iommu = rk_iommu_from_dev(dev);
1031 if (!iommu)
1032 return 0;
1033
1034 dev_dbg(dev, "Attaching to iommu domain\n");
1035
1036 /* iommu already attached */
1037 if (iommu->domain == domain)
1038 return 0;
1039
1040 if (iommu->domain)
1041 rk_iommu_detach_device(iommu->domain, dev);
1042
1043 iommu->domain = domain;
1044
1045 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
1046 list_add_tail(&iommu->node, &rk_domain->iommus);
1047 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
1048
Marc Zyngier3fc7c5c2018-08-24 16:06:36 +01001049 ret = pm_runtime_get_if_in_use(iommu->dev);
1050 if (!ret || WARN_ON_ONCE(ret < 0))
Jeffy Chen0f181d32018-03-23 15:38:13 +08001051 return 0;
1052
1053 ret = rk_iommu_enable(iommu);
1054 if (ret)
1055 rk_iommu_detach_device(iommu->domain, dev);
1056
1057 pm_runtime_put(iommu->dev);
1058
1059 return ret;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001060}
1061
Joerg Roedelbcd516a2015-03-26 13:43:17 +01001062static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001063{
1064 struct rk_iommu_domain *rk_domain;
1065
Shunqian Zhenga93db2f2016-06-24 10:13:30 +08001066 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
Joerg Roedelbcd516a2015-03-26 13:43:17 +01001067 return NULL;
1068
Jeffy Chen9176a302018-03-23 15:38:10 +08001069 if (!dma_dev)
Joerg Roedelbcd516a2015-03-26 13:43:17 +01001070 return NULL;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001071
Ezequiel Garcia42bb97b2019-10-02 14:29:23 -03001072 rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
Shunqian Zheng4f0aba62016-06-24 10:13:29 +08001073 if (!rk_domain)
Jeffy Chen9176a302018-03-23 15:38:10 +08001074 return NULL;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +08001075
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001076 /*
1077 * rk32xx iommus use a 2 level pagetable.
1078 * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
1079 * Allocate one 4 KiB page for each table.
1080 */
1081 rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
1082 if (!rk_domain->dt)
Robin Murphyb811a452021-08-11 13:21:22 +01001083 goto err_free_domain;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001084
Jeffy Chen9176a302018-03-23 15:38:10 +08001085 rk_domain->dt_dma = dma_map_single(dma_dev, rk_domain->dt,
Shunqian Zheng4f0aba62016-06-24 10:13:29 +08001086 SPAGE_SIZE, DMA_TO_DEVICE);
Jeffy Chen9176a302018-03-23 15:38:10 +08001087 if (dma_mapping_error(dma_dev, rk_domain->dt_dma)) {
1088 dev_err(dma_dev, "DMA map error for DT\n");
Shunqian Zheng4f0aba62016-06-24 10:13:29 +08001089 goto err_free_dt;
1090 }
1091
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001092 spin_lock_init(&rk_domain->iommus_lock);
1093 spin_lock_init(&rk_domain->dt_lock);
1094 INIT_LIST_HEAD(&rk_domain->iommus);
1095
Shunqian Zhenga93db2f2016-06-24 10:13:30 +08001096 rk_domain->domain.geometry.aperture_start = 0;
1097 rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32);
1098 rk_domain->domain.geometry.force_aperture = true;
1099
Joerg Roedelbcd516a2015-03-26 13:43:17 +01001100 return &rk_domain->domain;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001101
Shunqian Zheng4f0aba62016-06-24 10:13:29 +08001102err_free_dt:
1103 free_page((unsigned long)rk_domain->dt);
Ezequiel Garcia42bb97b2019-10-02 14:29:23 -03001104err_free_domain:
1105 kfree(rk_domain);
Shunqian Zheng4f0aba62016-06-24 10:13:29 +08001106
Joerg Roedelbcd516a2015-03-26 13:43:17 +01001107 return NULL;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001108}
1109
Joerg Roedelbcd516a2015-03-26 13:43:17 +01001110static void rk_iommu_domain_free(struct iommu_domain *domain)
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001111{
Joerg Roedelbcd516a2015-03-26 13:43:17 +01001112 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001113 int i;
1114
1115 WARN_ON(!list_empty(&rk_domain->iommus));
1116
1117 for (i = 0; i < NUM_DT_ENTRIES; i++) {
1118 u32 dte = rk_domain->dt[i];
1119 if (rk_dte_is_pt_valid(dte)) {
Benjamin Gaignard227014b2021-06-04 18:44:40 +02001120 phys_addr_t pt_phys = rk_ops->pt_address(dte);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001121 u32 *page_table = phys_to_virt(pt_phys);
Jeffy Chen9176a302018-03-23 15:38:10 +08001122 dma_unmap_single(dma_dev, pt_phys,
Shunqian Zheng4f0aba62016-06-24 10:13:29 +08001123 SPAGE_SIZE, DMA_TO_DEVICE);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001124 free_page((unsigned long)page_table);
1125 }
1126 }
1127
Jeffy Chen9176a302018-03-23 15:38:10 +08001128 dma_unmap_single(dma_dev, rk_domain->dt_dma,
Shunqian Zheng4f0aba62016-06-24 10:13:29 +08001129 SPAGE_SIZE, DMA_TO_DEVICE);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001130 free_page((unsigned long)rk_domain->dt);
Shunqian Zheng4f0aba62016-06-24 10:13:29 +08001131
Ezequiel Garcia42bb97b2019-10-02 14:29:23 -03001132 kfree(rk_domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001133}
1134
Joerg Roedeld8260442020-04-29 15:37:03 +02001135static struct iommu_device *rk_iommu_probe_device(struct device *dev)
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001136{
Jeffy Chen0f181d32018-03-23 15:38:13 +08001137 struct rk_iommudata *data;
Joerg Roedeld8260442020-04-29 15:37:03 +02001138 struct rk_iommu *iommu;
Jeffy Chen0f181d32018-03-23 15:38:13 +08001139
Joerg Roedel8b9cc3b2020-06-25 15:08:28 +02001140 data = dev_iommu_priv_get(dev);
Jeffy Chen0f181d32018-03-23 15:38:13 +08001141 if (!data)
Joerg Roedeld8260442020-04-29 15:37:03 +02001142 return ERR_PTR(-ENODEV);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001143
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001144 iommu = rk_iommu_from_dev(dev);
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001145
Rafael J. Wysockiea4f6402019-02-01 01:54:21 +01001146 data->link = device_link_add(dev, iommu->dev,
1147 DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
Jeffy Chen5fd577c2018-03-23 15:38:11 +08001148
Joerg Roedeld8260442020-04-29 15:37:03 +02001149 return &iommu->iommu;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001150}
1151
Joerg Roedeld8260442020-04-29 15:37:03 +02001152static void rk_iommu_release_device(struct device *dev)
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001153{
Joerg Roedel8b9cc3b2020-06-25 15:08:28 +02001154 struct rk_iommudata *data = dev_iommu_priv_get(dev);
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001155
Jeffy Chen0f181d32018-03-23 15:38:13 +08001156 device_link_del(data->link);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001157}
1158
Jeffy Chen57c26952018-03-23 15:38:14 +08001159static struct iommu_group *rk_iommu_device_group(struct device *dev)
1160{
1161 struct rk_iommu *iommu;
1162
1163 iommu = rk_iommu_from_dev(dev);
1164
1165 return iommu_group_ref_get(iommu->group);
1166}
1167
Jeffy Chen5fd577c2018-03-23 15:38:11 +08001168static int rk_iommu_of_xlate(struct device *dev,
1169 struct of_phandle_args *args)
1170{
1171 struct platform_device *iommu_dev;
1172 struct rk_iommudata *data;
1173
1174 data = devm_kzalloc(dma_dev, sizeof(*data), GFP_KERNEL);
1175 if (!data)
1176 return -ENOMEM;
1177
1178 iommu_dev = of_find_device_by_node(args->np);
1179
1180 data->iommu = platform_get_drvdata(iommu_dev);
Joerg Roedel8b9cc3b2020-06-25 15:08:28 +02001181 dev_iommu_priv_set(dev, data);
Jeffy Chen5fd577c2018-03-23 15:38:11 +08001182
Arnd Bergmann40fa84e2018-04-04 12:23:53 +02001183 platform_device_put(iommu_dev);
Jeffy Chen5fd577c2018-03-23 15:38:11 +08001184
1185 return 0;
1186}
1187
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001188static const struct iommu_ops rk_iommu_ops = {
Joerg Roedelbcd516a2015-03-26 13:43:17 +01001189 .domain_alloc = rk_iommu_domain_alloc,
1190 .domain_free = rk_iommu_domain_free,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001191 .attach_dev = rk_iommu_attach_device,
1192 .detach_dev = rk_iommu_detach_device,
1193 .map = rk_iommu_map,
1194 .unmap = rk_iommu_unmap,
Joerg Roedeld8260442020-04-29 15:37:03 +02001195 .probe_device = rk_iommu_probe_device,
1196 .release_device = rk_iommu_release_device,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001197 .iova_to_phys = rk_iommu_iova_to_phys,
Jeffy Chen57c26952018-03-23 15:38:14 +08001198 .device_group = rk_iommu_device_group,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001199 .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
Jeffy Chen5fd577c2018-03-23 15:38:11 +08001200 .of_xlate = rk_iommu_of_xlate,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001201};
1202
1203static int rk_iommu_probe(struct platform_device *pdev)
1204{
1205 struct device *dev = &pdev->dev;
1206 struct rk_iommu *iommu;
1207 struct resource *res;
Benjamin Gaignard227014b2021-06-04 18:44:40 +02001208 const struct rk_iommu_ops *ops;
Shunqian Zheng3d08f4342016-06-24 10:13:28 +08001209 int num_res = pdev->num_resources;
Heiko Stuebnerf9258152019-09-25 20:43:46 +02001210 int err, i;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001211
1212 iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
1213 if (!iommu)
1214 return -ENOMEM;
1215
1216 platform_set_drvdata(pdev, iommu);
1217 iommu->dev = dev;
ZhengShunQiancd6438c2016-01-19 15:03:00 +08001218 iommu->num_mmu = 0;
Shunqian Zheng3d08f4342016-06-24 10:13:28 +08001219
Benjamin Gaignard227014b2021-06-04 18:44:40 +02001220 ops = of_device_get_match_data(dev);
1221 if (!rk_ops)
1222 rk_ops = ops;
1223
1224 /*
1225 * That should not happen unless different versions of the
1226 * hardware block are embedded the same SoC
1227 */
1228 if (WARN_ON(rk_ops != ops))
1229 return -EINVAL;
1230
Kees Cooka86854d2018-06-12 14:07:58 -07001231 iommu->bases = devm_kcalloc(dev, num_res, sizeof(*iommu->bases),
ZhengShunQiancd6438c2016-01-19 15:03:00 +08001232 GFP_KERNEL);
1233 if (!iommu->bases)
1234 return -ENOMEM;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001235
Shunqian Zheng3d08f4342016-06-24 10:13:28 +08001236 for (i = 0; i < num_res; i++) {
ZhengShunQiancd6438c2016-01-19 15:03:00 +08001237 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
Tomeu Vizoso8d7f2d82016-03-21 12:00:23 +01001238 if (!res)
1239 continue;
ZhengShunQiancd6438c2016-01-19 15:03:00 +08001240 iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
1241 if (IS_ERR(iommu->bases[i]))
1242 continue;
1243 iommu->num_mmu++;
1244 }
1245 if (iommu->num_mmu == 0)
1246 return PTR_ERR(iommu->bases[0]);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001247
Heiko Stuebnerf9258152019-09-25 20:43:46 +02001248 iommu->num_irq = platform_irq_count(pdev);
1249 if (iommu->num_irq < 0)
1250 return iommu->num_irq;
1251
Simon Xuec3aa4742017-07-24 10:37:15 +08001252 iommu->reset_disabled = device_property_read_bool(dev,
1253 "rockchip,disable-mmu-reset");
1254
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001255 iommu->num_clocks = ARRAY_SIZE(rk_iommu_clocks);
1256 iommu->clocks = devm_kcalloc(iommu->dev, iommu->num_clocks,
1257 sizeof(*iommu->clocks), GFP_KERNEL);
1258 if (!iommu->clocks)
1259 return -ENOMEM;
1260
1261 for (i = 0; i < iommu->num_clocks; ++i)
1262 iommu->clocks[i].id = rk_iommu_clocks[i];
1263
Heiko Stuebner2f8c7f22018-04-17 14:09:15 +02001264 /*
1265 * iommu clocks should be present for all new devices and devicetrees
1266 * but there are older devicetrees without clocks out in the wild.
1267 * So clocks as optional for the time being.
1268 */
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001269 err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks);
Heiko Stuebner2f8c7f22018-04-17 14:09:15 +02001270 if (err == -ENOENT)
1271 iommu->num_clocks = 0;
1272 else if (err)
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001273 return err;
1274
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001275 err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks);
1276 if (err)
1277 return err;
1278
Jeffy Chen57c26952018-03-23 15:38:14 +08001279 iommu->group = iommu_group_alloc();
1280 if (IS_ERR(iommu->group)) {
1281 err = PTR_ERR(iommu->group);
1282 goto err_unprepare_clocks;
1283 }
1284
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001285 err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
1286 if (err)
Jeffy Chen57c26952018-03-23 15:38:14 +08001287 goto err_put_group;
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001288
Robin Murphy2d471b22021-04-01 14:56:26 +01001289 err = iommu_device_register(&iommu->iommu, &rk_iommu_ops, dev);
Jeffy Chen6d9ffaa2018-03-23 15:38:02 +08001290 if (err)
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001291 goto err_remove_sysfs;
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001292
Jeffy Chen9176a302018-03-23 15:38:10 +08001293 /*
1294 * Use the first registered IOMMU device for domain to use with DMA
1295 * API, since a domain might not physically correspond to a single
1296 * IOMMU device..
1297 */
1298 if (!dma_dev)
1299 dma_dev = &pdev->dev;
1300
Jeffy Chen4d88a8a2018-03-23 15:38:12 +08001301 bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
1302
Jeffy Chen0f181d32018-03-23 15:38:13 +08001303 pm_runtime_enable(dev);
1304
Heiko Stuebnerf9258152019-09-25 20:43:46 +02001305 for (i = 0; i < iommu->num_irq; i++) {
1306 int irq = platform_get_irq(pdev, i);
1307
Marc Zyngier1aa55ca2018-08-24 16:06:37 +01001308 if (irq < 0)
1309 return irq;
1310
1311 err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
1312 IRQF_SHARED, dev_name(dev), iommu);
1313 if (err) {
1314 pm_runtime_disable(dev);
1315 goto err_remove_sysfs;
1316 }
1317 }
1318
Benjamin Gaignard227014b2021-06-04 18:44:40 +02001319 dma_set_mask_and_coherent(dev, rk_ops->dma_bit_mask);
1320
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001321 return 0;
1322err_remove_sysfs:
1323 iommu_device_sysfs_remove(&iommu->iommu);
Jeffy Chen57c26952018-03-23 15:38:14 +08001324err_put_group:
1325 iommu_group_put(iommu->group);
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001326err_unprepare_clocks:
1327 clk_bulk_unprepare(iommu->num_clocks, iommu->clocks);
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001328 return err;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001329}
1330
Marc Zyngier1a4e90f2018-02-20 20:25:04 +00001331static void rk_iommu_shutdown(struct platform_device *pdev)
1332{
Heiko Stuebner74bc2ab2018-08-27 12:56:24 +02001333 struct rk_iommu *iommu = platform_get_drvdata(pdev);
Heiko Stuebnerf9258152019-09-25 20:43:46 +02001334 int i;
Heiko Stuebner74bc2ab2018-08-27 12:56:24 +02001335
Heiko Stuebnerf9258152019-09-25 20:43:46 +02001336 for (i = 0; i < iommu->num_irq; i++) {
1337 int irq = platform_get_irq(pdev, i);
1338
Heiko Stuebner74bc2ab2018-08-27 12:56:24 +02001339 devm_free_irq(iommu->dev, irq, iommu);
Heiko Stuebnerf9258152019-09-25 20:43:46 +02001340 }
Heiko Stuebner74bc2ab2018-08-27 12:56:24 +02001341
Jeffy Chen0f181d32018-03-23 15:38:13 +08001342 pm_runtime_force_suspend(&pdev->dev);
Marc Zyngier1a4e90f2018-02-20 20:25:04 +00001343}
1344
Jeffy Chen0f181d32018-03-23 15:38:13 +08001345static int __maybe_unused rk_iommu_suspend(struct device *dev)
1346{
1347 struct rk_iommu *iommu = dev_get_drvdata(dev);
1348
1349 if (!iommu->domain)
1350 return 0;
1351
1352 rk_iommu_disable(iommu);
1353 return 0;
1354}
1355
1356static int __maybe_unused rk_iommu_resume(struct device *dev)
1357{
1358 struct rk_iommu *iommu = dev_get_drvdata(dev);
1359
1360 if (!iommu->domain)
1361 return 0;
1362
1363 return rk_iommu_enable(iommu);
1364}
1365
1366static const struct dev_pm_ops rk_iommu_pm_ops = {
1367 SET_RUNTIME_PM_OPS(rk_iommu_suspend, rk_iommu_resume, NULL)
1368 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1369 pm_runtime_force_resume)
1370};
1371
Benjamin Gaignard227014b2021-06-04 18:44:40 +02001372static struct rk_iommu_ops iommu_data_ops_v1 = {
1373 .pt_address = &rk_dte_pt_address,
1374 .mk_dtentries = &rk_mk_dte,
1375 .mk_ptentries = &rk_mk_pte,
1376 .dte_addr_phys = &rk_dte_addr_phys,
1377 .dma_addr_dte = &rk_dma_addr_dte,
1378 .dma_bit_mask = DMA_BIT_MASK(32),
1379};
1380
Benjamin Gaignardc55356c2021-06-04 18:44:41 +02001381static struct rk_iommu_ops iommu_data_ops_v2 = {
1382 .pt_address = &rk_dte_pt_address_v2,
1383 .mk_dtentries = &rk_mk_dte_v2,
1384 .mk_ptentries = &rk_mk_pte_v2,
1385 .dte_addr_phys = &rk_dte_addr_phys_v2,
1386 .dma_addr_dte = &rk_dma_addr_dte_v2,
1387 .dma_bit_mask = DMA_BIT_MASK(40),
1388};
Benjamin Gaignard227014b2021-06-04 18:44:40 +02001389
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001390static const struct of_device_id rk_iommu_dt_ids[] = {
Benjamin Gaignard227014b2021-06-04 18:44:40 +02001391 { .compatible = "rockchip,iommu",
1392 .data = &iommu_data_ops_v1,
1393 },
Benjamin Gaignardc55356c2021-06-04 18:44:41 +02001394 { .compatible = "rockchip,rk3568-iommu",
1395 .data = &iommu_data_ops_v2,
1396 },
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001397 { /* sentinel */ }
1398};
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001399
1400static struct platform_driver rk_iommu_driver = {
1401 .probe = rk_iommu_probe,
Marc Zyngier1a4e90f2018-02-20 20:25:04 +00001402 .shutdown = rk_iommu_shutdown,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001403 .driver = {
1404 .name = "rk_iommu",
Arnd Bergmannd9e7eb12015-04-10 23:58:24 +02001405 .of_match_table = rk_iommu_dt_ids,
Jeffy Chen0f181d32018-03-23 15:38:13 +08001406 .pm = &rk_iommu_pm_ops,
Jeffy Chen98b72b92018-03-23 15:38:01 +08001407 .suppress_bind_attrs = true,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001408 },
1409};
1410
1411static int __init rk_iommu_init(void)
1412{
Jeffy Chen9176a302018-03-23 15:38:10 +08001413 return platform_driver_register(&rk_iommu_driver);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001414}
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001415subsys_initcall(rk_iommu_init);