blob: 9704f3f76e63ea66582d6c4a3b33aa73fd51fd47 [file] [log] [blame]
Thomas Gleixner1a59d1b82019-05-27 08:55:05 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
4 *
5 * Rewrite, cleanup, new allocation schemes, virtual merging:
6 * Copyright (C) 2004 Olof Johansson, IBM Corporation
7 * and Ben. Herrenschmidt, IBM Corporation
8 *
9 * Dynamic DMA mapping support, bus-independent parts.
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 */
11
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/init.h>
14#include <linux/types.h>
15#include <linux/slab.h>
16#include <linux/mm.h>
17#include <linux/spinlock.h>
18#include <linux/string.h>
19#include <linux/dma-mapping.h>
Akinobu Mitaa66022c2009-12-15 16:48:28 -080020#include <linux/bitmap.h>
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -080021#include <linux/iommu-helper.h>
Milton Miller62a8bd62008-10-22 15:39:04 -050022#include <linux/crash_dump.h>
Anton Blanchardb4c3a872012-06-07 18:14:48 +000023#include <linux/hash.h>
Anton Blanchardd6b9a812012-06-24 18:26:17 +000024#include <linux/fault-inject.h>
25#include <linux/pci.h>
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +100026#include <linux/iommu.h>
27#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/io.h>
29#include <asm/prom.h>
30#include <asm/iommu.h>
31#include <asm/pci-bridge.h>
32#include <asm/machdep.h>
Haren Myneni5f508672006-06-22 23:35:10 -070033#include <asm/kdump.h>
Mahesh Salgaonkar3ccc00a2012-02-20 02:15:03 +000034#include <asm/fadump.h>
Anton Blanchardd6b9a812012-06-24 18:26:17 +000035#include <asm/vio.h>
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +100036#include <asm/tce.h>
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +110037#include <asm/mmu_context.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
39#define DBG(...)
40
FUJITA Tomonori191aee52010-03-02 14:25:38 +000041static int novmerge;
Jake Moilanen56997552007-03-29 08:44:02 -050042
Robert Jennings6490c492008-07-24 04:31:16 +100043static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
44
Linus Torvalds1da177e2005-04-16 15:20:36 -070045static int __init setup_iommu(char *str)
46{
47 if (!strcmp(str, "novmerge"))
48 novmerge = 1;
49 else if (!strcmp(str, "vmerge"))
50 novmerge = 0;
51 return 1;
52}
53
54__setup("iommu=", setup_iommu);
55
Anton Blanchardb4c3a872012-06-07 18:14:48 +000056static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
57
58/*
59 * We precalculate the hash to avoid doing it on every allocation.
60 *
61 * The hash is important to spread CPUs across all the pools. For example,
62 * on a POWER7 with 4 way SMT we want interrupts on the primary threads and
63 * with 4 pools all primary threads would map to the same pool.
64 */
65static int __init setup_iommu_pool_hash(void)
66{
67 unsigned int i;
68
69 for_each_possible_cpu(i)
70 per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
71
72 return 0;
73}
74subsys_initcall(setup_iommu_pool_hash);
75
Anton Blanchardd6b9a812012-06-24 18:26:17 +000076#ifdef CONFIG_FAIL_IOMMU
77
78static DECLARE_FAULT_ATTR(fail_iommu);
79
80static int __init setup_fail_iommu(char *str)
81{
82 return setup_fault_attr(&fail_iommu, str);
83}
84__setup("fail_iommu=", setup_fail_iommu);
85
86static bool should_fail_iommu(struct device *dev)
87{
88 return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1);
89}
90
91static int __init fail_iommu_debugfs(void)
92{
93 struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
94 NULL, &fail_iommu);
95
Rusty Russell8c6ffba2013-07-15 11:20:32 +093096 return PTR_ERR_OR_ZERO(dir);
Anton Blanchardd6b9a812012-06-24 18:26:17 +000097}
98late_initcall(fail_iommu_debugfs);
99
100static ssize_t fail_iommu_show(struct device *dev,
101 struct device_attribute *attr, char *buf)
102{
103 return sprintf(buf, "%d\n", dev->archdata.fail_iommu);
104}
105
106static ssize_t fail_iommu_store(struct device *dev,
107 struct device_attribute *attr, const char *buf,
108 size_t count)
109{
110 int i;
111
112 if (count > 0 && sscanf(buf, "%d", &i) > 0)
113 dev->archdata.fail_iommu = (i == 0) ? 0 : 1;
114
115 return count;
116}
117
Julia Lawall8a7aef22016-10-29 21:37:02 +0200118static DEVICE_ATTR_RW(fail_iommu);
Anton Blanchardd6b9a812012-06-24 18:26:17 +0000119
120static int fail_iommu_bus_notify(struct notifier_block *nb,
121 unsigned long action, void *data)
122{
123 struct device *dev = data;
124
125 if (action == BUS_NOTIFY_ADD_DEVICE) {
126 if (device_create_file(dev, &dev_attr_fail_iommu))
127 pr_warn("Unable to create IOMMU fault injection sysfs "
128 "entries\n");
129 } else if (action == BUS_NOTIFY_DEL_DEVICE) {
130 device_remove_file(dev, &dev_attr_fail_iommu);
131 }
132
133 return 0;
134}
135
136static struct notifier_block fail_iommu_bus_notifier = {
137 .notifier_call = fail_iommu_bus_notify
138};
139
140static int __init fail_iommu_setup(void)
141{
142#ifdef CONFIG_PCI
143 bus_register_notifier(&pci_bus_type, &fail_iommu_bus_notifier);
144#endif
145#ifdef CONFIG_IBMVIO
146 bus_register_notifier(&vio_bus_type, &fail_iommu_bus_notifier);
147#endif
148
149 return 0;
150}
151/*
152 * Must execute after PCI and VIO subsystem have initialised but before
153 * devices are probed.
154 */
155arch_initcall(fail_iommu_setup);
156#else
157static inline bool should_fail_iommu(struct device *dev)
158{
159 return false;
160}
161#endif
162
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800163static unsigned long iommu_range_alloc(struct device *dev,
164 struct iommu_table *tbl,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 unsigned long npages,
166 unsigned long *handle,
Olof Johansson7daa4112006-04-12 21:05:59 -0500167 unsigned long mask,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 unsigned int align_order)
169{
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800170 unsigned long n, end, start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 unsigned long limit;
172 int largealloc = npages > 15;
173 int pass = 0;
174 unsigned long align_mask;
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800175 unsigned long boundary_size;
Anton Blanchardd3622132012-06-03 19:44:25 +0000176 unsigned long flags;
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000177 unsigned int pool_nr;
178 struct iommu_pool *pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179
Michael Ellerman63b85622017-08-08 17:06:32 +1000180 align_mask = (1ull << align_order) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181
182 /* This allocator was derived from x86_64's bit string search */
183
184 /* Sanity check */
Nick Piggin13a2eea2006-10-04 17:25:44 +0200185 if (unlikely(npages == 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 if (printk_ratelimit())
187 WARN_ON(1);
Christoph Hellwigd11e3d32018-11-21 18:56:25 +0100188 return DMA_MAPPING_ERROR;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 }
190
Anton Blanchardd6b9a812012-06-24 18:26:17 +0000191 if (should_fail_iommu(dev))
Christoph Hellwigd11e3d32018-11-21 18:56:25 +0100192 return DMA_MAPPING_ERROR;
Anton Blanchardd6b9a812012-06-24 18:26:17 +0000193
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000194 /*
195 * We don't need to disable preemption here because any CPU can
196 * safely use any IOMMU pool.
197 */
Victor Aoqui75f327c2017-07-20 14:26:06 -0300198 pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
Anton Blanchardd3622132012-06-03 19:44:25 +0000199
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000200 if (largealloc)
201 pool = &(tbl->large_pool);
202 else
203 pool = &(tbl->pools[pool_nr]);
204
205 spin_lock_irqsave(&(pool->lock), flags);
206
207again:
Anton Blanchardd900bd72012-10-03 18:57:10 +0000208 if ((pass == 0) && handle && *handle &&
209 (*handle >= pool->start) && (*handle < pool->end))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 start = *handle;
211 else
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000212 start = pool->hint;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000214 limit = pool->end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215
216 /* The case below can happen if we have a small segment appended
217 * to a large, or when the previous alloc was at the very end of
218 * the available space. If so, go back to the initial start.
219 */
220 if (start >= limit)
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000221 start = pool->start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222
Olof Johansson7daa4112006-04-12 21:05:59 -0500223 if (limit + tbl->it_offset > mask) {
224 limit = mask - tbl->it_offset + 1;
225 /* If we're constrained on address range, first try
226 * at the masked hint to avoid O(n) search complexity,
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000227 * but on second pass, start at 0 in pool 0.
Olof Johansson7daa4112006-04-12 21:05:59 -0500228 */
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000229 if ((start & mask) >= limit || pass > 0) {
Anton Blanchardd900bd72012-10-03 18:57:10 +0000230 spin_unlock(&(pool->lock));
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000231 pool = &(tbl->pools[0]);
Anton Blanchardd900bd72012-10-03 18:57:10 +0000232 spin_lock(&(pool->lock));
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000233 start = pool->start;
234 } else {
Olof Johansson7daa4112006-04-12 21:05:59 -0500235 start &= mask;
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000236 }
Olof Johansson7daa4112006-04-12 21:05:59 -0500237 }
238
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800239 if (dev)
240 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
Alistair Poppled0847752013-12-09 18:17:03 +1100241 1 << tbl->it_page_shift);
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800242 else
Alistair Poppled0847752013-12-09 18:17:03 +1100243 boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift);
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800244 /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245
Alistair Poppled0847752013-12-09 18:17:03 +1100246 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
247 boundary_size >> tbl->it_page_shift, align_mask);
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800248 if (n == -1) {
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000249 if (likely(pass == 0)) {
250 /* First try the pool from the start */
251 pool->hint = pool->start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 pass++;
253 goto again;
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000254
255 } else if (pass <= tbl->nr_pools) {
256 /* Now try scanning all the other pools */
257 spin_unlock(&(pool->lock));
258 pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
259 pool = &tbl->pools[pool_nr];
260 spin_lock(&(pool->lock));
261 pool->hint = pool->start;
262 pass++;
263 goto again;
264
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 } else {
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000266 /* Give up */
267 spin_unlock_irqrestore(&(pool->lock), flags);
Christoph Hellwigd11e3d32018-11-21 18:56:25 +0100268 return DMA_MAPPING_ERROR;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 }
270 }
271
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800272 end = n + npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273
274 /* Bump the hint to a new block for small allocs. */
275 if (largealloc) {
276 /* Don't bump to new block to avoid fragmentation */
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000277 pool->hint = end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 } else {
279 /* Overflow will be taken care of at the next allocation */
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000280 pool->hint = (end + tbl->it_blocksize - 1) &
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 ~(tbl->it_blocksize - 1);
282 }
283
284 /* Update handle for SG allocations */
285 if (handle)
286 *handle = end;
287
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000288 spin_unlock_irqrestore(&(pool->lock), flags);
289
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 return n;
291}
292
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800293static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
294 void *page, unsigned int npages,
295 enum dma_data_direction direction,
Mark Nelson4f3dd8a2008-07-16 05:51:47 +1000296 unsigned long mask, unsigned int align_order,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700297 unsigned long attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298{
Anton Blanchardd3622132012-06-03 19:44:25 +0000299 unsigned long entry;
Christoph Hellwigd11e3d32018-11-21 18:56:25 +0100300 dma_addr_t ret = DMA_MAPPING_ERROR;
Robert Jennings6490c492008-07-24 04:31:16 +1000301 int build_fail;
Olof Johansson7daa4112006-04-12 21:05:59 -0500302
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800303 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304
Christoph Hellwigd11e3d32018-11-21 18:56:25 +0100305 if (unlikely(entry == DMA_MAPPING_ERROR))
306 return DMA_MAPPING_ERROR;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307
308 entry += tbl->it_offset; /* Offset into real TCE table */
Alistair Poppled0847752013-12-09 18:17:03 +1100309 ret = entry << tbl->it_page_shift; /* Set the return dma address */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310
311 /* Put the TCEs in the HW table */
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +1000312 build_fail = tbl->it_ops->set(tbl, entry, npages,
Alistair Poppled0847752013-12-09 18:17:03 +1100313 (unsigned long)page &
314 IOMMU_PAGE_MASK(tbl), direction, attrs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +1000316 /* tbl->it_ops->set() only returns non-zero for transient errors.
Robert Jennings6490c492008-07-24 04:31:16 +1000317 * Clean up the table bitmap in this case and return
Christoph Hellwigd11e3d32018-11-21 18:56:25 +0100318 * DMA_MAPPING_ERROR. For all other errors the functionality is
Robert Jennings6490c492008-07-24 04:31:16 +1000319 * not altered.
320 */
321 if (unlikely(build_fail)) {
322 __iommu_free(tbl, ret, npages);
Christoph Hellwigd11e3d32018-11-21 18:56:25 +0100323 return DMA_MAPPING_ERROR;
Robert Jennings6490c492008-07-24 04:31:16 +1000324 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325
326 /* Flush/invalidate TLB caches if necessary */
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +1000327 if (tbl->it_ops->flush)
328 tbl->it_ops->flush(tbl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 /* Make sure updates are seen by hardware */
331 mb();
332
333 return ret;
334}
335
Anton Blanchard67ca1412012-06-03 19:43:44 +0000336static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
337 unsigned int npages)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338{
339 unsigned long entry, free_entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
Alistair Poppled0847752013-12-09 18:17:03 +1100341 entry = dma_addr >> tbl->it_page_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 free_entry = entry - tbl->it_offset;
343
344 if (((free_entry + npages) > tbl->it_size) ||
345 (entry < tbl->it_offset)) {
346 if (printk_ratelimit()) {
347 printk(KERN_INFO "iommu_free: invalid entry\n");
348 printk(KERN_INFO "\tentry = 0x%lx\n", entry);
Ingo Molnarfe333322009-01-06 14:26:03 +0000349 printk(KERN_INFO "\tdma_addr = 0x%llx\n", (u64)dma_addr);
350 printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl);
351 printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno);
352 printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size);
353 printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset);
354 printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 WARN_ON(1);
356 }
Anton Blanchard67ca1412012-06-03 19:43:44 +0000357
358 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 }
360
Anton Blanchard67ca1412012-06-03 19:43:44 +0000361 return true;
362}
363
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000364static struct iommu_pool *get_pool(struct iommu_table *tbl,
365 unsigned long entry)
366{
367 struct iommu_pool *p;
368 unsigned long largepool_start = tbl->large_pool.start;
369
370 /* The large pool is the last pool at the top of the table */
371 if (entry >= largepool_start) {
372 p = &tbl->large_pool;
373 } else {
374 unsigned int pool_nr = entry / tbl->poolsize;
375
376 BUG_ON(pool_nr > tbl->nr_pools);
377 p = &tbl->pools[pool_nr];
378 }
379
380 return p;
381}
382
Anton Blanchard67ca1412012-06-03 19:43:44 +0000383static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
384 unsigned int npages)
385{
386 unsigned long entry, free_entry;
387 unsigned long flags;
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000388 struct iommu_pool *pool;
Anton Blanchard67ca1412012-06-03 19:43:44 +0000389
Alistair Poppled0847752013-12-09 18:17:03 +1100390 entry = dma_addr >> tbl->it_page_shift;
Anton Blanchard67ca1412012-06-03 19:43:44 +0000391 free_entry = entry - tbl->it_offset;
392
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000393 pool = get_pool(tbl, free_entry);
394
Anton Blanchard67ca1412012-06-03 19:43:44 +0000395 if (!iommu_free_check(tbl, dma_addr, npages))
396 return;
397
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +1000398 tbl->it_ops->clear(tbl, entry, npages);
Anton Blanchard67ca1412012-06-03 19:43:44 +0000399
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000400 spin_lock_irqsave(&(pool->lock), flags);
Anton Blanchard67ca1412012-06-03 19:43:44 +0000401 bitmap_clear(tbl->it_map, free_entry, npages);
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000402 spin_unlock_irqrestore(&(pool->lock), flags);
Anton Blanchard67ca1412012-06-03 19:43:44 +0000403}
404
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
406 unsigned int npages)
407{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 __iommu_free(tbl, dma_addr, npages);
409
410 /* Make sure TLB cache is flushed if the HW needs it. We do
411 * not do an mb() here on purpose, it is not needed on any of
412 * the current platforms.
413 */
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +1000414 if (tbl->it_ops->flush)
415 tbl->it_ops->flush(tbl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416}
417
Joerg Roedel0690cbd2014-11-05 15:28:30 +0100418int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
419 struct scatterlist *sglist, int nelems,
420 unsigned long mask, enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700421 unsigned long attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422{
423 dma_addr_t dma_next = 0, dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 struct scatterlist *s, *outs, *segstart;
Robert Jennings6490c492008-07-24 04:31:16 +1000425 int outcount, incount, i, build_fail = 0;
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100426 unsigned int align;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 unsigned long handle;
FUJITA Tomonori740c3ce2008-02-04 22:27:57 -0800428 unsigned int max_seg_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429
430 BUG_ON(direction == DMA_NONE);
431
432 if ((nelems == 0) || !tbl)
433 return 0;
434
435 outs = s = segstart = &sglist[0];
436 outcount = 1;
Brian Kingac9af7c2005-08-18 07:32:18 +1000437 incount = nelems;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 handle = 0;
439
440 /* Init first segment length for backout at failure */
441 outs->dma_length = 0;
442
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100443 DBG("sg mapping %d elements:\n", nelems);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444
FUJITA Tomonori740c3ce2008-02-04 22:27:57 -0800445 max_seg_size = dma_get_max_seg_size(dev);
Jens Axboe78bdc312007-10-12 13:44:12 +0200446 for_each_sg(sglist, s, nelems, i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 unsigned long vaddr, npages, entry, slen;
448
449 slen = s->length;
450 /* Sanity check */
451 if (slen == 0) {
452 dma_next = 0;
453 continue;
454 }
455 /* Allocate iommu entries for that segment */
Jens Axboe58b053e2007-10-22 20:02:46 +0200456 vaddr = (unsigned long) sg_virt(s);
Alistair Poppled0847752013-12-09 18:17:03 +1100457 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100458 align = 0;
Alistair Poppled0847752013-12-09 18:17:03 +1100459 if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100460 (vaddr & ~PAGE_MASK) == 0)
Alistair Poppled0847752013-12-09 18:17:03 +1100461 align = PAGE_SHIFT - tbl->it_page_shift;
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800462 entry = iommu_range_alloc(dev, tbl, npages, &handle,
Alistair Poppled0847752013-12-09 18:17:03 +1100463 mask >> tbl->it_page_shift, align);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
465 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
466
467 /* Handle failure */
Christoph Hellwigd11e3d32018-11-21 18:56:25 +0100468 if (unlikely(entry == DMA_MAPPING_ERROR)) {
Mauricio Faria de Oliveiraaf8a2492016-10-11 13:54:17 -0700469 if (!(attrs & DMA_ATTR_NO_WARN) &&
470 printk_ratelimit())
Anton Blanchard4dfa9c42010-12-07 14:36:05 +0000471 dev_info(dev, "iommu_alloc failed, tbl %p "
472 "vaddr %lx npages %lu\n", tbl, vaddr,
473 npages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 goto failure;
475 }
476
477 /* Convert entry to a dma_addr_t */
478 entry += tbl->it_offset;
Alistair Poppled0847752013-12-09 18:17:03 +1100479 dma_addr = entry << tbl->it_page_shift;
480 dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100482 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 npages, entry, dma_addr);
484
485 /* Insert into HW table */
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +1000486 build_fail = tbl->it_ops->set(tbl, entry, npages,
Alistair Poppled0847752013-12-09 18:17:03 +1100487 vaddr & IOMMU_PAGE_MASK(tbl),
488 direction, attrs);
Robert Jennings6490c492008-07-24 04:31:16 +1000489 if(unlikely(build_fail))
490 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491
492 /* If we are in an open segment, try merging */
493 if (segstart != s) {
494 DBG(" - trying merge...\n");
495 /* We cannot merge if:
496 * - allocated dma_addr isn't contiguous to previous allocation
497 */
FUJITA Tomonori740c3ce2008-02-04 22:27:57 -0800498 if (novmerge || (dma_addr != dma_next) ||
499 (outs->dma_length + s->length > max_seg_size)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 /* Can't merge: create a new segment */
501 segstart = s;
Jens Axboe78bdc312007-10-12 13:44:12 +0200502 outcount++;
503 outs = sg_next(outs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 DBG(" can't merge, new segment.\n");
505 } else {
506 outs->dma_length += s->length;
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100507 DBG(" merged, new len: %ux\n", outs->dma_length);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 }
509 }
510
511 if (segstart == s) {
512 /* This is a new segment, fill entries */
513 DBG(" - filling new segment.\n");
514 outs->dma_address = dma_addr;
515 outs->dma_length = slen;
516 }
517
518 /* Calculate next page pointer for contiguous check */
519 dma_next = dma_addr + slen;
520
521 DBG(" - dma next is: %lx\n", dma_next);
522 }
523
524 /* Flush/invalidate TLB caches if necessary */
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +1000525 if (tbl->it_ops->flush)
526 tbl->it_ops->flush(tbl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 DBG("mapped %d elements:\n", outcount);
529
Joerg Roedel0690cbd2014-11-05 15:28:30 +0100530 /* For the sake of ppc_iommu_unmap_sg, we clear out the length in the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 * next entry of the sglist if we didn't fill the list completely
532 */
Brian Kingac9af7c2005-08-18 07:32:18 +1000533 if (outcount < incount) {
Jens Axboe78bdc312007-10-12 13:44:12 +0200534 outs = sg_next(outs);
Christoph Hellwigd11e3d32018-11-21 18:56:25 +0100535 outs->dma_address = DMA_MAPPING_ERROR;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 outs->dma_length = 0;
537 }
Jake Moilanena958a262006-01-30 21:51:54 -0600538
539 /* Make sure updates are seen by hardware */
540 mb();
541
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 return outcount;
543
544 failure:
Jens Axboe78bdc312007-10-12 13:44:12 +0200545 for_each_sg(sglist, s, nelems, i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 if (s->dma_length != 0) {
547 unsigned long vaddr, npages;
548
Alistair Poppled0847752013-12-09 18:17:03 +1100549 vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
Joerg Roedel2994a3b2008-10-15 22:02:13 -0700550 npages = iommu_num_pages(s->dma_address, s->dma_length,
Alistair Poppled0847752013-12-09 18:17:03 +1100551 IOMMU_PAGE_SIZE(tbl));
Anton Blanchardd3622132012-06-03 19:44:25 +0000552 __iommu_free(tbl, vaddr, npages);
Christoph Hellwigd11e3d32018-11-21 18:56:25 +0100553 s->dma_address = DMA_MAPPING_ERROR;
Jake Moilanena958a262006-01-30 21:51:54 -0600554 s->dma_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 }
Jens Axboe78bdc312007-10-12 13:44:12 +0200556 if (s == outs)
557 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 return 0;
560}
561
562
Joerg Roedel0690cbd2014-11-05 15:28:30 +0100563void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
564 int nelems, enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700565 unsigned long attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566{
Jens Axboe78bdc312007-10-12 13:44:12 +0200567 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568
569 BUG_ON(direction == DMA_NONE);
570
571 if (!tbl)
572 return;
573
Jens Axboe78bdc312007-10-12 13:44:12 +0200574 sg = sglist;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 while (nelems--) {
576 unsigned int npages;
Jens Axboe78bdc312007-10-12 13:44:12 +0200577 dma_addr_t dma_handle = sg->dma_address;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578
Jens Axboe78bdc312007-10-12 13:44:12 +0200579 if (sg->dma_length == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 break;
Joerg Roedel2994a3b2008-10-15 22:02:13 -0700581 npages = iommu_num_pages(dma_handle, sg->dma_length,
Alistair Poppled0847752013-12-09 18:17:03 +1100582 IOMMU_PAGE_SIZE(tbl));
Anton Blanchardd3622132012-06-03 19:44:25 +0000583 __iommu_free(tbl, dma_handle, npages);
Jens Axboe78bdc312007-10-12 13:44:12 +0200584 sg = sg_next(sg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 }
586
587 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
588 * do not do an mb() here, the affected platforms do not need it
589 * when freeing.
590 */
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +1000591 if (tbl->it_ops->flush)
592 tbl->it_ops->flush(tbl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593}
594
Mohan Kumar M54622f12008-10-21 17:38:10 +0000595static void iommu_table_clear(struct iommu_table *tbl)
596{
Mahesh Salgaonkar3ccc00a2012-02-20 02:15:03 +0000597 /*
598 * In case of firmware assisted dump system goes through clean
599 * reboot process at the time of system crash. Hence it's safe to
600 * clear the TCE entries if firmware assisted dump is active.
601 */
602 if (!is_kdump_kernel() || is_fadump_active()) {
Mohan Kumar M54622f12008-10-21 17:38:10 +0000603 /* Clear the table in case firmware left allocations in it */
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +1000604 tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size);
Mohan Kumar M54622f12008-10-21 17:38:10 +0000605 return;
606 }
607
608#ifdef CONFIG_CRASH_DUMP
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +1000609 if (tbl->it_ops->get) {
Mohan Kumar M54622f12008-10-21 17:38:10 +0000610 unsigned long index, tceval, tcecount = 0;
611
612 /* Reserve the existing mappings left by the first kernel. */
613 for (index = 0; index < tbl->it_size; index++) {
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +1000614 tceval = tbl->it_ops->get(tbl, index + tbl->it_offset);
Mohan Kumar M54622f12008-10-21 17:38:10 +0000615 /*
616 * Freed TCE entry contains 0x7fffffffffffffff on JS20
617 */
618 if (tceval && (tceval != 0x7fffffffffffffffUL)) {
619 __set_bit(index, tbl->it_map);
620 tcecount++;
621 }
622 }
623
624 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
625 printk(KERN_WARNING "TCE table is full; freeing ");
626 printk(KERN_WARNING "%d entries for the kdump boot\n",
627 KDUMP_MIN_TCE_ENTRIES);
628 for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
629 index < tbl->it_size; index++)
630 __clear_bit(index, tbl->it_map);
631 }
632 }
633#endif
634}
635
Alexey Kardashevskiy201ed7f2019-07-18 15:11:39 +1000636static void iommu_table_reserve_pages(struct iommu_table *tbl,
637 unsigned long res_start, unsigned long res_end)
638{
639 int i;
640
641 WARN_ON_ONCE(res_end < res_start);
642 /*
643 * Reserve page 0 so it will not be used for any mappings.
644 * This avoids buggy drivers that consider page 0 to be invalid
645 * to crash the machine or even lose data.
646 */
647 if (tbl->it_offset == 0)
648 set_bit(0, tbl->it_map);
649
650 tbl->it_reserved_start = res_start;
651 tbl->it_reserved_end = res_end;
652
653 /* Check if res_start..res_end isn't empty and overlaps the table */
654 if (res_start && res_end &&
655 (tbl->it_offset + tbl->it_size < res_start ||
656 res_end < tbl->it_offset))
657 return;
658
659 for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
660 set_bit(i - tbl->it_offset, tbl->it_map);
661}
662
663static void iommu_table_release_pages(struct iommu_table *tbl)
664{
665 int i;
666
667 /*
668 * In case we have reserved the first bit, we should not emit
669 * the warning below.
670 */
671 if (tbl->it_offset == 0)
672 clear_bit(0, tbl->it_map);
673
674 for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
675 clear_bit(i - tbl->it_offset, tbl->it_map);
676}
677
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678/*
679 * Build a iommu_table structure. This contains a bit map which
680 * is used to manage allocation of the tce space.
681 */
Alexey Kardashevskiy201ed7f2019-07-18 15:11:39 +1000682struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
683 unsigned long res_start, unsigned long res_end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684{
685 unsigned long sz;
686 static int welcomed = 0;
Anton Blanchardca1588e2006-06-10 20:58:08 +1000687 struct page *page;
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000688 unsigned int i;
689 struct iommu_pool *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +1000691 BUG_ON(!tbl->it_ops);
692
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 /* number of bytes needed for the bitmap */
Akinobu Mitac5a08092012-11-04 02:03:43 +0000694 sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695
Nishanth Aravamudan1cf389d2013-10-01 14:04:53 -0700696 page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
Anton Blanchardca1588e2006-06-10 20:58:08 +1000697 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
Anton Blanchardca1588e2006-06-10 20:58:08 +1000699 tbl->it_map = page_address(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 memset(tbl->it_map, 0, sz);
701
Alexey Kardashevskiy201ed7f2019-07-18 15:11:39 +1000702 iommu_table_reserve_pages(tbl, res_start, res_end);
Thadeu Lima de Souza Cascardod12b5242011-09-20 03:07:24 +0000703
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000704 /* We only split the IOMMU table if we have 1GB or more of space */
Alistair Poppled0847752013-12-09 18:17:03 +1100705 if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000706 tbl->nr_pools = IOMMU_NR_POOLS;
707 else
708 tbl->nr_pools = 1;
709
710 /* We reserve the top 1/4 of the table for large allocations */
Benjamin Herrenschmidtdcd261b2012-07-13 17:45:49 +1000711 tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools;
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000712
Benjamin Herrenschmidtdcd261b2012-07-13 17:45:49 +1000713 for (i = 0; i < tbl->nr_pools; i++) {
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000714 p = &tbl->pools[i];
715 spin_lock_init(&(p->lock));
716 p->start = tbl->poolsize * i;
717 p->hint = p->start;
718 p->end = p->start + tbl->poolsize;
719 }
720
721 p = &tbl->large_pool;
722 spin_lock_init(&(p->lock));
723 p->start = tbl->poolsize * i;
724 p->hint = p->start;
725 p->end = tbl->it_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726
Mohan Kumar M54622f12008-10-21 17:38:10 +0000727 iommu_table_clear(tbl);
John Rosed3588ba2005-06-20 21:43:48 +1000728
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 if (!welcomed) {
730 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
731 novmerge ? "disabled" : "enabled");
732 welcomed = 1;
733 }
734
735 return tbl;
736}
737
Alexey Kardashevskiye5afdf92017-03-22 15:21:50 +1100738static void iommu_table_free(struct kref *kref)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739{
Akinobu Mitac5a08092012-11-04 02:03:43 +0000740 unsigned long bitmap_sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 unsigned int order;
Alexey Kardashevskiye5afdf92017-03-22 15:21:50 +1100742 struct iommu_table *tbl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743
Alexey Kardashevskiye5afdf92017-03-22 15:21:50 +1100744 tbl = container_of(kref, struct iommu_table, it_kref);
Alexey Kardashevskiy8aca92d2015-06-05 16:34:57 +1000745
Alexey Kardashevskiy11edf112017-03-22 15:21:49 +1100746 if (tbl->it_ops->free)
747 tbl->it_ops->free(tbl);
748
Alexey Kardashevskiy8aca92d2015-06-05 16:34:57 +1000749 if (!tbl->it_map) {
750 kfree(tbl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 return;
752 }
753
Alexey Kardashevskiy201ed7f2019-07-18 15:11:39 +1000754 iommu_table_release_pages(tbl);
Thadeu Lima de Souza Cascardo7f966d32012-12-28 09:08:51 +0000755
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 /* verify that table contains no entries */
Akinobu Mitac5a08092012-11-04 02:03:43 +0000757 if (!bitmap_empty(tbl->it_map, tbl->it_size))
Alexey Kardashevskiye5afdf92017-03-22 15:21:50 +1100758 pr_warn("%s: Unexpected TCEs\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759
760 /* calculate bitmap size in bytes */
Akinobu Mitac5a08092012-11-04 02:03:43 +0000761 bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762
763 /* free bitmap */
764 order = get_order(bitmap_sz);
765 free_pages((unsigned long) tbl->it_map, order);
766
767 /* free table */
768 kfree(tbl);
769}
Alexey Kardashevskiye5afdf92017-03-22 15:21:50 +1100770
771struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl)
772{
773 if (kref_get_unless_zero(&tbl->it_kref))
774 return tbl;
775
776 return NULL;
777}
778EXPORT_SYMBOL_GPL(iommu_tce_table_get);
779
780int iommu_tce_table_put(struct iommu_table *tbl)
781{
782 if (WARN_ON(!tbl))
783 return 0;
784
785 return kref_put(&tbl->it_kref, iommu_table_free);
786}
787EXPORT_SYMBOL_GPL(iommu_tce_table_put);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788
789/* Creates TCEs for a user provided buffer. The user buffer must be
Mark Nelsonf9226d52008-10-27 20:38:08 +0000790 * contiguous real kernel storage (not vmalloc). The address passed here
791 * comprises a page address and offset into that page. The dma_addr_t
792 * returned will point to the same byte within the page as was passed in.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 */
Mark Nelsonf9226d52008-10-27 20:38:08 +0000794dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
795 struct page *page, unsigned long offset, size_t size,
796 unsigned long mask, enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700797 unsigned long attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798{
Christoph Hellwigd11e3d32018-11-21 18:56:25 +0100799 dma_addr_t dma_handle = DMA_MAPPING_ERROR;
Mark Nelsonf9226d52008-10-27 20:38:08 +0000800 void *vaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 unsigned long uaddr;
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100802 unsigned int npages, align;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803
804 BUG_ON(direction == DMA_NONE);
805
Mark Nelsonf9226d52008-10-27 20:38:08 +0000806 vaddr = page_address(page) + offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 uaddr = (unsigned long)vaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808
809 if (tbl) {
Breno Leitao984ecdd2018-08-21 15:44:48 -0300810 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100811 align = 0;
Alistair Poppled0847752013-12-09 18:17:03 +1100812 if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100813 ((unsigned long)vaddr & ~PAGE_MASK) == 0)
Alistair Poppled0847752013-12-09 18:17:03 +1100814 align = PAGE_SHIFT - tbl->it_page_shift;
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100815
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800816 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
Alistair Poppled0847752013-12-09 18:17:03 +1100817 mask >> tbl->it_page_shift, align,
Mark Nelson4f3dd8a2008-07-16 05:51:47 +1000818 attrs);
Christoph Hellwigd11e3d32018-11-21 18:56:25 +0100819 if (dma_handle == DMA_MAPPING_ERROR) {
Mauricio Faria de Oliveiraaf8a2492016-10-11 13:54:17 -0700820 if (!(attrs & DMA_ATTR_NO_WARN) &&
821 printk_ratelimit()) {
Anton Blanchard4dfa9c42010-12-07 14:36:05 +0000822 dev_info(dev, "iommu_alloc failed, tbl %p "
823 "vaddr %p npages %d\n", tbl, vaddr,
824 npages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 }
826 } else
Alistair Poppled0847752013-12-09 18:17:03 +1100827 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 }
829
830 return dma_handle;
831}
832
Mark Nelsonf9226d52008-10-27 20:38:08 +0000833void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
834 size_t size, enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700835 unsigned long attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836{
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100837 unsigned int npages;
838
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 BUG_ON(direction == DMA_NONE);
840
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100841 if (tbl) {
Alistair Poppled0847752013-12-09 18:17:03 +1100842 npages = iommu_num_pages(dma_handle, size,
843 IOMMU_PAGE_SIZE(tbl));
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100844 iommu_free(tbl, dma_handle, npages);
845 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846}
847
848/* Allocates a contiguous real buffer and creates mappings over it.
849 * Returns the virtual address of the buffer and sets dma_handle
850 * to the dma address (mapping) of the first page.
851 */
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800852void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
853 size_t size, dma_addr_t *dma_handle,
854 unsigned long mask, gfp_t flag, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855{
856 void *ret = NULL;
857 dma_addr_t mapping;
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100858 unsigned int order;
859 unsigned int nio_pages, io_order;
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200860 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861
862 size = PAGE_ALIGN(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 order = get_order(size);
864
865 /*
866 * Client asked for way too much space. This is checked later
867 * anyway. It is easier to debug here for the drivers than in
868 * the tce tables.
869 */
870 if (order >= IOMAP_MAX_ORDER) {
Anton Blanchard4dfa9c42010-12-07 14:36:05 +0000871 dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
872 size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 return NULL;
874 }
875
876 if (!tbl)
877 return NULL;
878
879 /* Alloc enough pages (and possibly more) */
Paul Mackerras05061352006-06-10 18:17:35 +1000880 page = alloc_pages_node(node, flag, order);
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200881 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 return NULL;
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200883 ret = page_address(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 memset(ret, 0, size);
885
886 /* Set up tces to cover the allocated range */
Alistair Poppled0847752013-12-09 18:17:03 +1100887 nio_pages = size >> tbl->it_page_shift;
888 io_order = get_iommu_order(size, tbl);
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800889 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700890 mask >> tbl->it_page_shift, io_order, 0);
Christoph Hellwigd11e3d32018-11-21 18:56:25 +0100891 if (mapping == DMA_MAPPING_ERROR) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 free_pages((unsigned long)ret, order);
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200893 return NULL;
894 }
895 *dma_handle = mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 return ret;
897}
898
899void iommu_free_coherent(struct iommu_table *tbl, size_t size,
900 void *vaddr, dma_addr_t dma_handle)
901{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 if (tbl) {
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100903 unsigned int nio_pages;
904
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 size = PAGE_ALIGN(size);
Alistair Poppled0847752013-12-09 18:17:03 +1100906 nio_pages = size >> tbl->it_page_shift;
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100907 iommu_free(tbl, dma_handle, nio_pages);
908 size = PAGE_ALIGN(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 free_pages((unsigned long)vaddr, get_order(size));
910 }
911}
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +1000912
Alexey Kardashevskiy10b35b22015-06-05 16:35:05 +1000913unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir)
914{
915 switch (dir) {
916 case DMA_BIDIRECTIONAL:
917 return TCE_PCI_READ | TCE_PCI_WRITE;
918 case DMA_FROM_DEVICE:
919 return TCE_PCI_WRITE;
920 case DMA_TO_DEVICE:
921 return TCE_PCI_READ;
922 default:
923 return 0;
924 }
925}
926EXPORT_SYMBOL_GPL(iommu_direction_to_tce_perm);
927
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +1000928#ifdef CONFIG_IOMMU_API
929/*
930 * SPAPR TCE API
931 */
932static void group_release(void *iommu_data)
933{
Alexey Kardashevskiyb348aa62015-06-05 16:35:08 +1000934 struct iommu_table_group *table_group = iommu_data;
935
936 table_group->group = NULL;
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +1000937}
938
Alexey Kardashevskiyb348aa62015-06-05 16:35:08 +1000939void iommu_register_group(struct iommu_table_group *table_group,
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +1000940 int pci_domain_number, unsigned long pe_num)
941{
942 struct iommu_group *grp;
943 char *name;
944
945 grp = iommu_group_alloc();
946 if (IS_ERR(grp)) {
947 pr_warn("powerpc iommu api: cannot create new group, err=%ld\n",
948 PTR_ERR(grp));
949 return;
950 }
Alexey Kardashevskiyb348aa62015-06-05 16:35:08 +1000951 table_group->group = grp;
952 iommu_group_set_iommudata(grp, table_group, group_release);
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +1000953 name = kasprintf(GFP_KERNEL, "domain%d-pe%lx",
954 pci_domain_number, pe_num);
955 if (!name)
956 return;
957 iommu_group_set_name(grp, name);
958 kfree(name);
959}
960
961enum dma_data_direction iommu_tce_direction(unsigned long tce)
962{
963 if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE))
964 return DMA_BIDIRECTIONAL;
965 else if (tce & TCE_PCI_READ)
966 return DMA_TO_DEVICE;
967 else if (tce & TCE_PCI_WRITE)
968 return DMA_FROM_DEVICE;
969 else
970 return DMA_NONE;
971}
972EXPORT_SYMBOL_GPL(iommu_tce_direction);
973
974void iommu_flush_tce(struct iommu_table *tbl)
975{
976 /* Flush/invalidate TLB caches if necessary */
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +1000977 if (tbl->it_ops->flush)
978 tbl->it_ops->flush(tbl);
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +1000979
980 /* Make sure updates are seen by hardware */
981 mb();
982}
983EXPORT_SYMBOL_GPL(iommu_flush_tce);
984
Alexey Kardashevskiyb1af23d2017-03-22 15:21:55 +1100985int iommu_tce_check_ioba(unsigned long page_shift,
986 unsigned long offset, unsigned long size,
987 unsigned long ioba, unsigned long npages)
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +1000988{
Alexey Kardashevskiyb1af23d2017-03-22 15:21:55 +1100989 unsigned long mask = (1UL << page_shift) - 1;
990
991 if (ioba & mask)
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +1000992 return -EINVAL;
993
Alexey Kardashevskiyb1af23d2017-03-22 15:21:55 +1100994 ioba >>= page_shift;
995 if (ioba < offset)
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +1000996 return -EINVAL;
997
Alexey Kardashevskiyb1af23d2017-03-22 15:21:55 +1100998 if ((ioba + 1) > (offset + size))
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +1000999 return -EINVAL;
1000
1001 return 0;
1002}
Alexey Kardashevskiyb1af23d2017-03-22 15:21:55 +11001003EXPORT_SYMBOL_GPL(iommu_tce_check_ioba);
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001004
Alexey Kardashevskiyb1af23d2017-03-22 15:21:55 +11001005int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa)
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001006{
Alexey Kardashevskiyb1af23d2017-03-22 15:21:55 +11001007 unsigned long mask = (1UL << page_shift) - 1;
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001008
Alexey Kardashevskiyb1af23d2017-03-22 15:21:55 +11001009 if (gpa & mask)
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001010 return -EINVAL;
1011
1012 return 0;
1013}
Alexey Kardashevskiyb1af23d2017-03-22 15:21:55 +11001014EXPORT_SYMBOL_GPL(iommu_tce_check_gpa);
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001015
Alexey Kardashevskiy35872482019-08-29 18:52:48 +10001016extern long iommu_tce_xchg_no_kill(struct mm_struct *mm,
1017 struct iommu_table *tbl,
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +11001018 unsigned long entry, unsigned long *hpa,
1019 enum dma_data_direction *direction)
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001020{
Alexey Kardashevskiy05c6cfb2015-06-05 16:35:15 +10001021 long ret;
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +11001022 unsigned long size = 0;
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001023
Alexey Kardashevskiy35872482019-08-29 18:52:48 +10001024 ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, false);
Alexey Kardashevskiy05c6cfb2015-06-05 16:35:15 +10001025 if (!ret && ((*direction == DMA_FROM_DEVICE) ||
Alexey Kardashevskiyc10c21e2018-12-19 19:52:15 +11001026 (*direction == DMA_BIDIRECTIONAL)) &&
1027 !mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift,
1028 &size))
Alexey Kardashevskiy05c6cfb2015-06-05 16:35:15 +10001029 SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT));
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001030
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001031 return ret;
1032}
Alexey Kardashevskiy35872482019-08-29 18:52:48 +10001033EXPORT_SYMBOL_GPL(iommu_tce_xchg_no_kill);
1034
1035void iommu_tce_kill(struct iommu_table *tbl,
1036 unsigned long entry, unsigned long pages)
1037{
1038 if (tbl->it_ops->tce_kill)
1039 tbl->it_ops->tce_kill(tbl, entry, pages, false);
1040}
1041EXPORT_SYMBOL_GPL(iommu_tce_kill);
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001042
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001043int iommu_take_ownership(struct iommu_table *tbl)
1044{
Alexey Kardashevskiyb82c75b2015-06-05 16:35:11 +10001045 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1046 int ret = 0;
1047
Alexey Kardashevskiy05c6cfb2015-06-05 16:35:15 +10001048 /*
1049 * VFIO does not control TCE entries allocation and the guest
1050 * can write new TCEs on top of existing ones so iommu_tce_build()
1051 * must be able to release old pages. This functionality
1052 * requires exchange() callback defined so if it is not
1053 * implemented, we disallow taking ownership over the table.
1054 */
Alexey Kardashevskiya102f132019-08-29 18:52:52 +10001055 if (!tbl->it_ops->xchg_no_kill)
Alexey Kardashevskiy05c6cfb2015-06-05 16:35:15 +10001056 return -EINVAL;
1057
Alexey Kardashevskiyb82c75b2015-06-05 16:35:11 +10001058 spin_lock_irqsave(&tbl->large_pool.lock, flags);
1059 for (i = 0; i < tbl->nr_pools; i++)
1060 spin_lock(&tbl->pools[i].lock);
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001061
Alexey Kardashevskiy201ed7f2019-07-18 15:11:39 +10001062 iommu_table_release_pages(tbl);
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001063
1064 if (!bitmap_empty(tbl->it_map, tbl->it_size)) {
1065 pr_err("iommu_tce: it_map is not empty");
Alexey Kardashevskiyb82c75b2015-06-05 16:35:11 +10001066 ret = -EBUSY;
Alexey Kardashevskiy201ed7f2019-07-18 15:11:39 +10001067 /* Undo iommu_table_release_pages, i.e. restore bit#0, etc */
1068 iommu_table_reserve_pages(tbl, tbl->it_reserved_start,
1069 tbl->it_reserved_end);
Alexey Kardashevskiyb82c75b2015-06-05 16:35:11 +10001070 } else {
1071 memset(tbl->it_map, 0xff, sz);
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001072 }
1073
Alexey Kardashevskiyb82c75b2015-06-05 16:35:11 +10001074 for (i = 0; i < tbl->nr_pools; i++)
1075 spin_unlock(&tbl->pools[i].lock);
1076 spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001077
Alexey Kardashevskiyb82c75b2015-06-05 16:35:11 +10001078 return ret;
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001079}
1080EXPORT_SYMBOL_GPL(iommu_take_ownership);
1081
1082void iommu_release_ownership(struct iommu_table *tbl)
1083{
Alexey Kardashevskiyb82c75b2015-06-05 16:35:11 +10001084 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1085
1086 spin_lock_irqsave(&tbl->large_pool.lock, flags);
1087 for (i = 0; i < tbl->nr_pools; i++)
1088 spin_lock(&tbl->pools[i].lock);
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001089
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001090 memset(tbl->it_map, 0, sz);
1091
Alexey Kardashevskiy201ed7f2019-07-18 15:11:39 +10001092 iommu_table_reserve_pages(tbl, tbl->it_reserved_start,
1093 tbl->it_reserved_end);
Alexey Kardashevskiyb82c75b2015-06-05 16:35:11 +10001094
1095 for (i = 0; i < tbl->nr_pools; i++)
1096 spin_unlock(&tbl->pools[i].lock);
1097 spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001098}
1099EXPORT_SYMBOL_GPL(iommu_release_ownership);
1100
Alexey Kardashevskiyc4e9d3c2018-12-19 19:52:21 +11001101int iommu_add_device(struct iommu_table_group *table_group, struct device *dev)
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001102{
Gavin Shan763fe0a2014-08-06 17:10:16 +10001103 /*
1104 * The sysfs entries should be populated before
1105 * binding IOMMU group. If sysfs entries isn't
1106 * ready, we simply bail.
1107 */
1108 if (!device_is_registered(dev))
1109 return -ENOENT;
1110
Joerg Roedelbf8763d2018-11-30 14:23:19 +01001111 if (device_iommu_mapped(dev)) {
Gavin Shan763fe0a2014-08-06 17:10:16 +10001112 pr_debug("%s: Skipping device %s with iommu group %d\n",
1113 __func__, dev_name(dev),
1114 iommu_group_id(dev->iommu_group));
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001115 return -EBUSY;
1116 }
1117
Gavin Shan763fe0a2014-08-06 17:10:16 +10001118 pr_debug("%s: Adding %s to iommu group %d\n",
Alexey Kardashevskiyc4e9d3c2018-12-19 19:52:21 +11001119 __func__, dev_name(dev), iommu_group_id(table_group->group));
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001120
Alexey Kardashevskiyc4e9d3c2018-12-19 19:52:21 +11001121 return iommu_group_add_device(table_group->group, dev);
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001122}
Alexey Kardashevskiyd905c5d2013-11-21 17:43:14 +11001123EXPORT_SYMBOL_GPL(iommu_add_device);
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001124
Alexey Kardashevskiyd905c5d2013-11-21 17:43:14 +11001125void iommu_del_device(struct device *dev)
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001126{
Gavin Shan0c4b9e22014-01-13 11:36:22 +08001127 /*
1128 * Some devices might not have IOMMU table and group
1129 * and we needn't detach them from the associated
1130 * IOMMU groups
1131 */
Joerg Roedelbf8763d2018-11-30 14:23:19 +01001132 if (!device_iommu_mapped(dev)) {
Gavin Shan0c4b9e22014-01-13 11:36:22 +08001133 pr_debug("iommu_tce: skipping device %s with no tbl\n",
1134 dev_name(dev));
1135 return;
1136 }
1137
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001138 iommu_group_remove_device(dev);
1139}
Alexey Kardashevskiyd905c5d2013-11-21 17:43:14 +11001140EXPORT_SYMBOL_GPL(iommu_del_device);
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001141#endif /* CONFIG_IOMMU_API */