blob: a3689fdedd4ae25768d5d3b1ca041e1658f7b6be [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
3 *
4 * Rewrite, cleanup, new allocation schemes, virtual merging:
5 * Copyright (C) 2004 Olof Johansson, IBM Corporation
6 * and Ben. Herrenschmidt, IBM Corporation
7 *
8 * Dynamic DMA mapping support, bus-independent parts.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/init.h>
27#include <linux/types.h>
28#include <linux/slab.h>
29#include <linux/mm.h>
30#include <linux/spinlock.h>
31#include <linux/string.h>
32#include <linux/dma-mapping.h>
Akinobu Mitaa66022c2009-12-15 16:48:28 -080033#include <linux/bitmap.h>
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -080034#include <linux/iommu-helper.h>
Milton Miller62a8bd62008-10-22 15:39:04 -050035#include <linux/crash_dump.h>
Anton Blanchardb4c3a872012-06-07 18:14:48 +000036#include <linux/hash.h>
Anton Blanchardd6b9a812012-06-24 18:26:17 +000037#include <linux/fault-inject.h>
38#include <linux/pci.h>
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +100039#include <linux/iommu.h>
40#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <asm/io.h>
42#include <asm/prom.h>
43#include <asm/iommu.h>
44#include <asm/pci-bridge.h>
45#include <asm/machdep.h>
Haren Myneni5f508672006-06-22 23:35:10 -070046#include <asm/kdump.h>
Mahesh Salgaonkar3ccc00a2012-02-20 02:15:03 +000047#include <asm/fadump.h>
Anton Blanchardd6b9a812012-06-24 18:26:17 +000048#include <asm/vio.h>
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +100049#include <asm/tce.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
51#define DBG(...)
52
FUJITA Tomonori191aee52010-03-02 14:25:38 +000053static int novmerge;
Jake Moilanen56997552007-03-29 08:44:02 -050054
Robert Jennings6490c492008-07-24 04:31:16 +100055static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057static int __init setup_iommu(char *str)
58{
59 if (!strcmp(str, "novmerge"))
60 novmerge = 1;
61 else if (!strcmp(str, "vmerge"))
62 novmerge = 0;
63 return 1;
64}
65
66__setup("iommu=", setup_iommu);
67
Anton Blanchardb4c3a872012-06-07 18:14:48 +000068static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
69
70/*
71 * We precalculate the hash to avoid doing it on every allocation.
72 *
73 * The hash is important to spread CPUs across all the pools. For example,
74 * on a POWER7 with 4 way SMT we want interrupts on the primary threads and
75 * with 4 pools all primary threads would map to the same pool.
76 */
77static int __init setup_iommu_pool_hash(void)
78{
79 unsigned int i;
80
81 for_each_possible_cpu(i)
82 per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
83
84 return 0;
85}
86subsys_initcall(setup_iommu_pool_hash);
87
Anton Blanchardd6b9a812012-06-24 18:26:17 +000088#ifdef CONFIG_FAIL_IOMMU
89
90static DECLARE_FAULT_ATTR(fail_iommu);
91
92static int __init setup_fail_iommu(char *str)
93{
94 return setup_fault_attr(&fail_iommu, str);
95}
96__setup("fail_iommu=", setup_fail_iommu);
97
98static bool should_fail_iommu(struct device *dev)
99{
100 return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1);
101}
102
103static int __init fail_iommu_debugfs(void)
104{
105 struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
106 NULL, &fail_iommu);
107
Rusty Russell8c6ffba2013-07-15 11:20:32 +0930108 return PTR_ERR_OR_ZERO(dir);
Anton Blanchardd6b9a812012-06-24 18:26:17 +0000109}
110late_initcall(fail_iommu_debugfs);
111
112static ssize_t fail_iommu_show(struct device *dev,
113 struct device_attribute *attr, char *buf)
114{
115 return sprintf(buf, "%d\n", dev->archdata.fail_iommu);
116}
117
118static ssize_t fail_iommu_store(struct device *dev,
119 struct device_attribute *attr, const char *buf,
120 size_t count)
121{
122 int i;
123
124 if (count > 0 && sscanf(buf, "%d", &i) > 0)
125 dev->archdata.fail_iommu = (i == 0) ? 0 : 1;
126
127 return count;
128}
129
130static DEVICE_ATTR(fail_iommu, S_IRUGO|S_IWUSR, fail_iommu_show,
131 fail_iommu_store);
132
133static int fail_iommu_bus_notify(struct notifier_block *nb,
134 unsigned long action, void *data)
135{
136 struct device *dev = data;
137
138 if (action == BUS_NOTIFY_ADD_DEVICE) {
139 if (device_create_file(dev, &dev_attr_fail_iommu))
140 pr_warn("Unable to create IOMMU fault injection sysfs "
141 "entries\n");
142 } else if (action == BUS_NOTIFY_DEL_DEVICE) {
143 device_remove_file(dev, &dev_attr_fail_iommu);
144 }
145
146 return 0;
147}
148
149static struct notifier_block fail_iommu_bus_notifier = {
150 .notifier_call = fail_iommu_bus_notify
151};
152
153static int __init fail_iommu_setup(void)
154{
155#ifdef CONFIG_PCI
156 bus_register_notifier(&pci_bus_type, &fail_iommu_bus_notifier);
157#endif
158#ifdef CONFIG_IBMVIO
159 bus_register_notifier(&vio_bus_type, &fail_iommu_bus_notifier);
160#endif
161
162 return 0;
163}
164/*
165 * Must execute after PCI and VIO subsystem have initialised but before
166 * devices are probed.
167 */
168arch_initcall(fail_iommu_setup);
169#else
170static inline bool should_fail_iommu(struct device *dev)
171{
172 return false;
173}
174#endif
175
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800176static unsigned long iommu_range_alloc(struct device *dev,
177 struct iommu_table *tbl,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 unsigned long npages,
179 unsigned long *handle,
Olof Johansson7daa4112006-04-12 21:05:59 -0500180 unsigned long mask,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 unsigned int align_order)
182{
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800183 unsigned long n, end, start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 unsigned long limit;
185 int largealloc = npages > 15;
186 int pass = 0;
187 unsigned long align_mask;
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800188 unsigned long boundary_size;
Anton Blanchardd3622132012-06-03 19:44:25 +0000189 unsigned long flags;
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000190 unsigned int pool_nr;
191 struct iommu_pool *pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192
193 align_mask = 0xffffffffffffffffl >> (64 - align_order);
194
195 /* This allocator was derived from x86_64's bit string search */
196
197 /* Sanity check */
Nick Piggin13a2eea2006-10-04 17:25:44 +0200198 if (unlikely(npages == 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 if (printk_ratelimit())
200 WARN_ON(1);
201 return DMA_ERROR_CODE;
202 }
203
Anton Blanchardd6b9a812012-06-24 18:26:17 +0000204 if (should_fail_iommu(dev))
205 return DMA_ERROR_CODE;
206
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000207 /*
208 * We don't need to disable preemption here because any CPU can
209 * safely use any IOMMU pool.
210 */
Christoph Lameter69111ba2014-10-21 15:23:25 -0500211 pool_nr = __this_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
Anton Blanchardd3622132012-06-03 19:44:25 +0000212
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000213 if (largealloc)
214 pool = &(tbl->large_pool);
215 else
216 pool = &(tbl->pools[pool_nr]);
217
218 spin_lock_irqsave(&(pool->lock), flags);
219
220again:
Anton Blanchardd900bd72012-10-03 18:57:10 +0000221 if ((pass == 0) && handle && *handle &&
222 (*handle >= pool->start) && (*handle < pool->end))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 start = *handle;
224 else
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000225 start = pool->hint;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000227 limit = pool->end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228
229 /* The case below can happen if we have a small segment appended
230 * to a large, or when the previous alloc was at the very end of
231 * the available space. If so, go back to the initial start.
232 */
233 if (start >= limit)
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000234 start = pool->start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235
Olof Johansson7daa4112006-04-12 21:05:59 -0500236 if (limit + tbl->it_offset > mask) {
237 limit = mask - tbl->it_offset + 1;
238 /* If we're constrained on address range, first try
239 * at the masked hint to avoid O(n) search complexity,
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000240 * but on second pass, start at 0 in pool 0.
Olof Johansson7daa4112006-04-12 21:05:59 -0500241 */
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000242 if ((start & mask) >= limit || pass > 0) {
Anton Blanchardd900bd72012-10-03 18:57:10 +0000243 spin_unlock(&(pool->lock));
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000244 pool = &(tbl->pools[0]);
Anton Blanchardd900bd72012-10-03 18:57:10 +0000245 spin_lock(&(pool->lock));
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000246 start = pool->start;
247 } else {
Olof Johansson7daa4112006-04-12 21:05:59 -0500248 start &= mask;
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000249 }
Olof Johansson7daa4112006-04-12 21:05:59 -0500250 }
251
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800252 if (dev)
253 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
Alistair Poppled0847752013-12-09 18:17:03 +1100254 1 << tbl->it_page_shift);
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800255 else
Alistair Poppled0847752013-12-09 18:17:03 +1100256 boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift);
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800257 /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
Alistair Poppled0847752013-12-09 18:17:03 +1100259 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
260 boundary_size >> tbl->it_page_shift, align_mask);
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800261 if (n == -1) {
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000262 if (likely(pass == 0)) {
263 /* First try the pool from the start */
264 pool->hint = pool->start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 pass++;
266 goto again;
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000267
268 } else if (pass <= tbl->nr_pools) {
269 /* Now try scanning all the other pools */
270 spin_unlock(&(pool->lock));
271 pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
272 pool = &tbl->pools[pool_nr];
273 spin_lock(&(pool->lock));
274 pool->hint = pool->start;
275 pass++;
276 goto again;
277
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 } else {
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000279 /* Give up */
280 spin_unlock_irqrestore(&(pool->lock), flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 return DMA_ERROR_CODE;
282 }
283 }
284
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800285 end = n + npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
287 /* Bump the hint to a new block for small allocs. */
288 if (largealloc) {
289 /* Don't bump to new block to avoid fragmentation */
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000290 pool->hint = end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 } else {
292 /* Overflow will be taken care of at the next allocation */
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000293 pool->hint = (end + tbl->it_blocksize - 1) &
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 ~(tbl->it_blocksize - 1);
295 }
296
297 /* Update handle for SG allocations */
298 if (handle)
299 *handle = end;
300
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000301 spin_unlock_irqrestore(&(pool->lock), flags);
302
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 return n;
304}
305
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800306static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
307 void *page, unsigned int npages,
308 enum dma_data_direction direction,
Mark Nelson4f3dd8a2008-07-16 05:51:47 +1000309 unsigned long mask, unsigned int align_order,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700310 unsigned long attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311{
Anton Blanchardd3622132012-06-03 19:44:25 +0000312 unsigned long entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 dma_addr_t ret = DMA_ERROR_CODE;
Robert Jennings6490c492008-07-24 04:31:16 +1000314 int build_fail;
Olof Johansson7daa4112006-04-12 21:05:59 -0500315
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800316 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317
Anton Blanchard0e4bc952012-06-03 19:43:02 +0000318 if (unlikely(entry == DMA_ERROR_CODE))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 return DMA_ERROR_CODE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320
321 entry += tbl->it_offset; /* Offset into real TCE table */
Alistair Poppled0847752013-12-09 18:17:03 +1100322 ret = entry << tbl->it_page_shift; /* Set the return dma address */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323
324 /* Put the TCEs in the HW table */
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +1000325 build_fail = tbl->it_ops->set(tbl, entry, npages,
Alistair Poppled0847752013-12-09 18:17:03 +1100326 (unsigned long)page &
327 IOMMU_PAGE_MASK(tbl), direction, attrs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +1000329 /* tbl->it_ops->set() only returns non-zero for transient errors.
Robert Jennings6490c492008-07-24 04:31:16 +1000330 * Clean up the table bitmap in this case and return
331 * DMA_ERROR_CODE. For all other errors the functionality is
332 * not altered.
333 */
334 if (unlikely(build_fail)) {
335 __iommu_free(tbl, ret, npages);
Robert Jennings6490c492008-07-24 04:31:16 +1000336 return DMA_ERROR_CODE;
337 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338
339 /* Flush/invalidate TLB caches if necessary */
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +1000340 if (tbl->it_ops->flush)
341 tbl->it_ops->flush(tbl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 /* Make sure updates are seen by hardware */
344 mb();
345
346 return ret;
347}
348
Anton Blanchard67ca1412012-06-03 19:43:44 +0000349static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
350 unsigned int npages)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351{
352 unsigned long entry, free_entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353
Alistair Poppled0847752013-12-09 18:17:03 +1100354 entry = dma_addr >> tbl->it_page_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 free_entry = entry - tbl->it_offset;
356
357 if (((free_entry + npages) > tbl->it_size) ||
358 (entry < tbl->it_offset)) {
359 if (printk_ratelimit()) {
360 printk(KERN_INFO "iommu_free: invalid entry\n");
361 printk(KERN_INFO "\tentry = 0x%lx\n", entry);
Ingo Molnarfe333322009-01-06 14:26:03 +0000362 printk(KERN_INFO "\tdma_addr = 0x%llx\n", (u64)dma_addr);
363 printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl);
364 printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno);
365 printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size);
366 printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset);
367 printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 WARN_ON(1);
369 }
Anton Blanchard67ca1412012-06-03 19:43:44 +0000370
371 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 }
373
Anton Blanchard67ca1412012-06-03 19:43:44 +0000374 return true;
375}
376
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000377static struct iommu_pool *get_pool(struct iommu_table *tbl,
378 unsigned long entry)
379{
380 struct iommu_pool *p;
381 unsigned long largepool_start = tbl->large_pool.start;
382
383 /* The large pool is the last pool at the top of the table */
384 if (entry >= largepool_start) {
385 p = &tbl->large_pool;
386 } else {
387 unsigned int pool_nr = entry / tbl->poolsize;
388
389 BUG_ON(pool_nr > tbl->nr_pools);
390 p = &tbl->pools[pool_nr];
391 }
392
393 return p;
394}
395
Anton Blanchard67ca1412012-06-03 19:43:44 +0000396static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
397 unsigned int npages)
398{
399 unsigned long entry, free_entry;
400 unsigned long flags;
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000401 struct iommu_pool *pool;
Anton Blanchard67ca1412012-06-03 19:43:44 +0000402
Alistair Poppled0847752013-12-09 18:17:03 +1100403 entry = dma_addr >> tbl->it_page_shift;
Anton Blanchard67ca1412012-06-03 19:43:44 +0000404 free_entry = entry - tbl->it_offset;
405
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000406 pool = get_pool(tbl, free_entry);
407
Anton Blanchard67ca1412012-06-03 19:43:44 +0000408 if (!iommu_free_check(tbl, dma_addr, npages))
409 return;
410
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +1000411 tbl->it_ops->clear(tbl, entry, npages);
Anton Blanchard67ca1412012-06-03 19:43:44 +0000412
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000413 spin_lock_irqsave(&(pool->lock), flags);
Anton Blanchard67ca1412012-06-03 19:43:44 +0000414 bitmap_clear(tbl->it_map, free_entry, npages);
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000415 spin_unlock_irqrestore(&(pool->lock), flags);
Anton Blanchard67ca1412012-06-03 19:43:44 +0000416}
417
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
419 unsigned int npages)
420{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 __iommu_free(tbl, dma_addr, npages);
422
423 /* Make sure TLB cache is flushed if the HW needs it. We do
424 * not do an mb() here on purpose, it is not needed on any of
425 * the current platforms.
426 */
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +1000427 if (tbl->it_ops->flush)
428 tbl->it_ops->flush(tbl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429}
430
Joerg Roedel0690cbd2014-11-05 15:28:30 +0100431int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
432 struct scatterlist *sglist, int nelems,
433 unsigned long mask, enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700434 unsigned long attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435{
436 dma_addr_t dma_next = 0, dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 struct scatterlist *s, *outs, *segstart;
Robert Jennings6490c492008-07-24 04:31:16 +1000438 int outcount, incount, i, build_fail = 0;
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100439 unsigned int align;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 unsigned long handle;
FUJITA Tomonori740c3ce2008-02-04 22:27:57 -0800441 unsigned int max_seg_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442
443 BUG_ON(direction == DMA_NONE);
444
445 if ((nelems == 0) || !tbl)
446 return 0;
447
448 outs = s = segstart = &sglist[0];
449 outcount = 1;
Brian Kingac9af7c2005-08-18 07:32:18 +1000450 incount = nelems;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 handle = 0;
452
453 /* Init first segment length for backout at failure */
454 outs->dma_length = 0;
455
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100456 DBG("sg mapping %d elements:\n", nelems);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457
FUJITA Tomonori740c3ce2008-02-04 22:27:57 -0800458 max_seg_size = dma_get_max_seg_size(dev);
Jens Axboe78bdc312007-10-12 13:44:12 +0200459 for_each_sg(sglist, s, nelems, i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 unsigned long vaddr, npages, entry, slen;
461
462 slen = s->length;
463 /* Sanity check */
464 if (slen == 0) {
465 dma_next = 0;
466 continue;
467 }
468 /* Allocate iommu entries for that segment */
Jens Axboe58b053e2007-10-22 20:02:46 +0200469 vaddr = (unsigned long) sg_virt(s);
Alistair Poppled0847752013-12-09 18:17:03 +1100470 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100471 align = 0;
Alistair Poppled0847752013-12-09 18:17:03 +1100472 if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100473 (vaddr & ~PAGE_MASK) == 0)
Alistair Poppled0847752013-12-09 18:17:03 +1100474 align = PAGE_SHIFT - tbl->it_page_shift;
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800475 entry = iommu_range_alloc(dev, tbl, npages, &handle,
Alistair Poppled0847752013-12-09 18:17:03 +1100476 mask >> tbl->it_page_shift, align);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477
478 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
479
480 /* Handle failure */
481 if (unlikely(entry == DMA_ERROR_CODE)) {
Mauricio Faria de Oliveiraaf8a2492016-10-11 13:54:17 -0700482 if (!(attrs & DMA_ATTR_NO_WARN) &&
483 printk_ratelimit())
Anton Blanchard4dfa9c42010-12-07 14:36:05 +0000484 dev_info(dev, "iommu_alloc failed, tbl %p "
485 "vaddr %lx npages %lu\n", tbl, vaddr,
486 npages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 goto failure;
488 }
489
490 /* Convert entry to a dma_addr_t */
491 entry += tbl->it_offset;
Alistair Poppled0847752013-12-09 18:17:03 +1100492 dma_addr = entry << tbl->it_page_shift;
493 dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100495 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 npages, entry, dma_addr);
497
498 /* Insert into HW table */
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +1000499 build_fail = tbl->it_ops->set(tbl, entry, npages,
Alistair Poppled0847752013-12-09 18:17:03 +1100500 vaddr & IOMMU_PAGE_MASK(tbl),
501 direction, attrs);
Robert Jennings6490c492008-07-24 04:31:16 +1000502 if(unlikely(build_fail))
503 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504
505 /* If we are in an open segment, try merging */
506 if (segstart != s) {
507 DBG(" - trying merge...\n");
508 /* We cannot merge if:
509 * - allocated dma_addr isn't contiguous to previous allocation
510 */
FUJITA Tomonori740c3ce2008-02-04 22:27:57 -0800511 if (novmerge || (dma_addr != dma_next) ||
512 (outs->dma_length + s->length > max_seg_size)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 /* Can't merge: create a new segment */
514 segstart = s;
Jens Axboe78bdc312007-10-12 13:44:12 +0200515 outcount++;
516 outs = sg_next(outs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 DBG(" can't merge, new segment.\n");
518 } else {
519 outs->dma_length += s->length;
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100520 DBG(" merged, new len: %ux\n", outs->dma_length);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 }
522 }
523
524 if (segstart == s) {
525 /* This is a new segment, fill entries */
526 DBG(" - filling new segment.\n");
527 outs->dma_address = dma_addr;
528 outs->dma_length = slen;
529 }
530
531 /* Calculate next page pointer for contiguous check */
532 dma_next = dma_addr + slen;
533
534 DBG(" - dma next is: %lx\n", dma_next);
535 }
536
537 /* Flush/invalidate TLB caches if necessary */
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +1000538 if (tbl->it_ops->flush)
539 tbl->it_ops->flush(tbl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 DBG("mapped %d elements:\n", outcount);
542
Joerg Roedel0690cbd2014-11-05 15:28:30 +0100543 /* For the sake of ppc_iommu_unmap_sg, we clear out the length in the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 * next entry of the sglist if we didn't fill the list completely
545 */
Brian Kingac9af7c2005-08-18 07:32:18 +1000546 if (outcount < incount) {
Jens Axboe78bdc312007-10-12 13:44:12 +0200547 outs = sg_next(outs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 outs->dma_address = DMA_ERROR_CODE;
549 outs->dma_length = 0;
550 }
Jake Moilanena958a262006-01-30 21:51:54 -0600551
552 /* Make sure updates are seen by hardware */
553 mb();
554
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 return outcount;
556
557 failure:
Jens Axboe78bdc312007-10-12 13:44:12 +0200558 for_each_sg(sglist, s, nelems, i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 if (s->dma_length != 0) {
560 unsigned long vaddr, npages;
561
Alistair Poppled0847752013-12-09 18:17:03 +1100562 vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
Joerg Roedel2994a3b2008-10-15 22:02:13 -0700563 npages = iommu_num_pages(s->dma_address, s->dma_length,
Alistair Poppled0847752013-12-09 18:17:03 +1100564 IOMMU_PAGE_SIZE(tbl));
Anton Blanchardd3622132012-06-03 19:44:25 +0000565 __iommu_free(tbl, vaddr, npages);
Jake Moilanena958a262006-01-30 21:51:54 -0600566 s->dma_address = DMA_ERROR_CODE;
567 s->dma_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 }
Jens Axboe78bdc312007-10-12 13:44:12 +0200569 if (s == outs)
570 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 return 0;
573}
574
575
Joerg Roedel0690cbd2014-11-05 15:28:30 +0100576void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
577 int nelems, enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700578 unsigned long attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579{
Jens Axboe78bdc312007-10-12 13:44:12 +0200580 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581
582 BUG_ON(direction == DMA_NONE);
583
584 if (!tbl)
585 return;
586
Jens Axboe78bdc312007-10-12 13:44:12 +0200587 sg = sglist;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 while (nelems--) {
589 unsigned int npages;
Jens Axboe78bdc312007-10-12 13:44:12 +0200590 dma_addr_t dma_handle = sg->dma_address;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591
Jens Axboe78bdc312007-10-12 13:44:12 +0200592 if (sg->dma_length == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 break;
Joerg Roedel2994a3b2008-10-15 22:02:13 -0700594 npages = iommu_num_pages(dma_handle, sg->dma_length,
Alistair Poppled0847752013-12-09 18:17:03 +1100595 IOMMU_PAGE_SIZE(tbl));
Anton Blanchardd3622132012-06-03 19:44:25 +0000596 __iommu_free(tbl, dma_handle, npages);
Jens Axboe78bdc312007-10-12 13:44:12 +0200597 sg = sg_next(sg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 }
599
600 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
601 * do not do an mb() here, the affected platforms do not need it
602 * when freeing.
603 */
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +1000604 if (tbl->it_ops->flush)
605 tbl->it_ops->flush(tbl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606}
607
Mohan Kumar M54622f12008-10-21 17:38:10 +0000608static void iommu_table_clear(struct iommu_table *tbl)
609{
Mahesh Salgaonkar3ccc00a2012-02-20 02:15:03 +0000610 /*
611 * In case of firmware assisted dump system goes through clean
612 * reboot process at the time of system crash. Hence it's safe to
613 * clear the TCE entries if firmware assisted dump is active.
614 */
615 if (!is_kdump_kernel() || is_fadump_active()) {
Mohan Kumar M54622f12008-10-21 17:38:10 +0000616 /* Clear the table in case firmware left allocations in it */
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +1000617 tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size);
Mohan Kumar M54622f12008-10-21 17:38:10 +0000618 return;
619 }
620
621#ifdef CONFIG_CRASH_DUMP
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +1000622 if (tbl->it_ops->get) {
Mohan Kumar M54622f12008-10-21 17:38:10 +0000623 unsigned long index, tceval, tcecount = 0;
624
625 /* Reserve the existing mappings left by the first kernel. */
626 for (index = 0; index < tbl->it_size; index++) {
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +1000627 tceval = tbl->it_ops->get(tbl, index + tbl->it_offset);
Mohan Kumar M54622f12008-10-21 17:38:10 +0000628 /*
629 * Freed TCE entry contains 0x7fffffffffffffff on JS20
630 */
631 if (tceval && (tceval != 0x7fffffffffffffffUL)) {
632 __set_bit(index, tbl->it_map);
633 tcecount++;
634 }
635 }
636
637 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
638 printk(KERN_WARNING "TCE table is full; freeing ");
639 printk(KERN_WARNING "%d entries for the kdump boot\n",
640 KDUMP_MIN_TCE_ENTRIES);
641 for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
642 index < tbl->it_size; index++)
643 __clear_bit(index, tbl->it_map);
644 }
645 }
646#endif
647}
648
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649/*
650 * Build a iommu_table structure. This contains a bit map which
651 * is used to manage allocation of the tce space.
652 */
Anton Blanchardca1588e2006-06-10 20:58:08 +1000653struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654{
655 unsigned long sz;
656 static int welcomed = 0;
Anton Blanchardca1588e2006-06-10 20:58:08 +1000657 struct page *page;
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000658 unsigned int i;
659 struct iommu_pool *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +1000661 BUG_ON(!tbl->it_ops);
662
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 /* number of bytes needed for the bitmap */
Akinobu Mitac5a08092012-11-04 02:03:43 +0000664 sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665
Nishanth Aravamudan1cf389d2013-10-01 14:04:53 -0700666 page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
Anton Blanchardca1588e2006-06-10 20:58:08 +1000667 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
Anton Blanchardca1588e2006-06-10 20:58:08 +1000669 tbl->it_map = page_address(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 memset(tbl->it_map, 0, sz);
671
Thadeu Lima de Souza Cascardod12b5242011-09-20 03:07:24 +0000672 /*
673 * Reserve page 0 so it will not be used for any mappings.
674 * This avoids buggy drivers that consider page 0 to be invalid
675 * to crash the machine or even lose data.
676 */
677 if (tbl->it_offset == 0)
678 set_bit(0, tbl->it_map);
679
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000680 /* We only split the IOMMU table if we have 1GB or more of space */
Alistair Poppled0847752013-12-09 18:17:03 +1100681 if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000682 tbl->nr_pools = IOMMU_NR_POOLS;
683 else
684 tbl->nr_pools = 1;
685
686 /* We reserve the top 1/4 of the table for large allocations */
Benjamin Herrenschmidtdcd261b2012-07-13 17:45:49 +1000687 tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools;
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000688
Benjamin Herrenschmidtdcd261b2012-07-13 17:45:49 +1000689 for (i = 0; i < tbl->nr_pools; i++) {
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000690 p = &tbl->pools[i];
691 spin_lock_init(&(p->lock));
692 p->start = tbl->poolsize * i;
693 p->hint = p->start;
694 p->end = p->start + tbl->poolsize;
695 }
696
697 p = &tbl->large_pool;
698 spin_lock_init(&(p->lock));
699 p->start = tbl->poolsize * i;
700 p->hint = p->start;
701 p->end = tbl->it_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702
Mohan Kumar M54622f12008-10-21 17:38:10 +0000703 iommu_table_clear(tbl);
John Rosed3588ba2005-06-20 21:43:48 +1000704
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 if (!welcomed) {
706 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
707 novmerge ? "disabled" : "enabled");
708 welcomed = 1;
709 }
710
711 return tbl;
712}
713
Stephen Rothwell68d315f2007-12-06 13:39:19 +1100714void iommu_free_table(struct iommu_table *tbl, const char *node_name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715{
Akinobu Mitac5a08092012-11-04 02:03:43 +0000716 unsigned long bitmap_sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 unsigned int order;
718
Alexey Kardashevskiy8aca92d2015-06-05 16:34:57 +1000719 if (!tbl)
720 return;
721
Alexey Kardashevskiy11edf112017-03-22 15:21:49 +1100722 if (tbl->it_ops->free)
723 tbl->it_ops->free(tbl);
724
Alexey Kardashevskiy8aca92d2015-06-05 16:34:57 +1000725 if (!tbl->it_map) {
726 kfree(tbl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 return;
728 }
729
Thadeu Lima de Souza Cascardo7f966d32012-12-28 09:08:51 +0000730 /*
731 * In case we have reserved the first bit, we should not emit
732 * the warning below.
733 */
734 if (tbl->it_offset == 0)
735 clear_bit(0, tbl->it_map);
736
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 /* verify that table contains no entries */
Akinobu Mitac5a08092012-11-04 02:03:43 +0000738 if (!bitmap_empty(tbl->it_map, tbl->it_size))
739 pr_warn("%s: Unexpected TCEs for %s\n", __func__, node_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740
741 /* calculate bitmap size in bytes */
Akinobu Mitac5a08092012-11-04 02:03:43 +0000742 bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743
744 /* free bitmap */
745 order = get_order(bitmap_sz);
746 free_pages((unsigned long) tbl->it_map, order);
747
748 /* free table */
749 kfree(tbl);
750}
Alexey Kardashevskiy11edf112017-03-22 15:21:49 +1100751EXPORT_SYMBOL_GPL(iommu_free_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752
753/* Creates TCEs for a user provided buffer. The user buffer must be
Mark Nelsonf9226d52008-10-27 20:38:08 +0000754 * contiguous real kernel storage (not vmalloc). The address passed here
755 * comprises a page address and offset into that page. The dma_addr_t
756 * returned will point to the same byte within the page as was passed in.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 */
Mark Nelsonf9226d52008-10-27 20:38:08 +0000758dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
759 struct page *page, unsigned long offset, size_t size,
760 unsigned long mask, enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700761 unsigned long attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762{
763 dma_addr_t dma_handle = DMA_ERROR_CODE;
Mark Nelsonf9226d52008-10-27 20:38:08 +0000764 void *vaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 unsigned long uaddr;
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100766 unsigned int npages, align;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767
768 BUG_ON(direction == DMA_NONE);
769
Mark Nelsonf9226d52008-10-27 20:38:08 +0000770 vaddr = page_address(page) + offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 uaddr = (unsigned long)vaddr;
Alistair Poppled0847752013-12-09 18:17:03 +1100772 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773
774 if (tbl) {
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100775 align = 0;
Alistair Poppled0847752013-12-09 18:17:03 +1100776 if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100777 ((unsigned long)vaddr & ~PAGE_MASK) == 0)
Alistair Poppled0847752013-12-09 18:17:03 +1100778 align = PAGE_SHIFT - tbl->it_page_shift;
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100779
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800780 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
Alistair Poppled0847752013-12-09 18:17:03 +1100781 mask >> tbl->it_page_shift, align,
Mark Nelson4f3dd8a2008-07-16 05:51:47 +1000782 attrs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 if (dma_handle == DMA_ERROR_CODE) {
Mauricio Faria de Oliveiraaf8a2492016-10-11 13:54:17 -0700784 if (!(attrs & DMA_ATTR_NO_WARN) &&
785 printk_ratelimit()) {
Anton Blanchard4dfa9c42010-12-07 14:36:05 +0000786 dev_info(dev, "iommu_alloc failed, tbl %p "
787 "vaddr %p npages %d\n", tbl, vaddr,
788 npages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 }
790 } else
Alistair Poppled0847752013-12-09 18:17:03 +1100791 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 }
793
794 return dma_handle;
795}
796
Mark Nelsonf9226d52008-10-27 20:38:08 +0000797void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
798 size_t size, enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700799 unsigned long attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800{
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100801 unsigned int npages;
802
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 BUG_ON(direction == DMA_NONE);
804
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100805 if (tbl) {
Alistair Poppled0847752013-12-09 18:17:03 +1100806 npages = iommu_num_pages(dma_handle, size,
807 IOMMU_PAGE_SIZE(tbl));
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100808 iommu_free(tbl, dma_handle, npages);
809 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810}
811
812/* Allocates a contiguous real buffer and creates mappings over it.
813 * Returns the virtual address of the buffer and sets dma_handle
814 * to the dma address (mapping) of the first page.
815 */
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800816void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
817 size_t size, dma_addr_t *dma_handle,
818 unsigned long mask, gfp_t flag, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819{
820 void *ret = NULL;
821 dma_addr_t mapping;
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100822 unsigned int order;
823 unsigned int nio_pages, io_order;
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200824 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825
826 size = PAGE_ALIGN(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 order = get_order(size);
828
829 /*
830 * Client asked for way too much space. This is checked later
831 * anyway. It is easier to debug here for the drivers than in
832 * the tce tables.
833 */
834 if (order >= IOMAP_MAX_ORDER) {
Anton Blanchard4dfa9c42010-12-07 14:36:05 +0000835 dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
836 size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 return NULL;
838 }
839
840 if (!tbl)
841 return NULL;
842
843 /* Alloc enough pages (and possibly more) */
Paul Mackerras05061352006-06-10 18:17:35 +1000844 page = alloc_pages_node(node, flag, order);
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200845 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 return NULL;
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200847 ret = page_address(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 memset(ret, 0, size);
849
850 /* Set up tces to cover the allocated range */
Alistair Poppled0847752013-12-09 18:17:03 +1100851 nio_pages = size >> tbl->it_page_shift;
852 io_order = get_iommu_order(size, tbl);
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800853 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700854 mask >> tbl->it_page_shift, io_order, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 if (mapping == DMA_ERROR_CODE) {
856 free_pages((unsigned long)ret, order);
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200857 return NULL;
858 }
859 *dma_handle = mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 return ret;
861}
862
863void iommu_free_coherent(struct iommu_table *tbl, size_t size,
864 void *vaddr, dma_addr_t dma_handle)
865{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 if (tbl) {
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100867 unsigned int nio_pages;
868
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 size = PAGE_ALIGN(size);
Alistair Poppled0847752013-12-09 18:17:03 +1100870 nio_pages = size >> tbl->it_page_shift;
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100871 iommu_free(tbl, dma_handle, nio_pages);
872 size = PAGE_ALIGN(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 free_pages((unsigned long)vaddr, get_order(size));
874 }
875}
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +1000876
Alexey Kardashevskiy10b35b22015-06-05 16:35:05 +1000877unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir)
878{
879 switch (dir) {
880 case DMA_BIDIRECTIONAL:
881 return TCE_PCI_READ | TCE_PCI_WRITE;
882 case DMA_FROM_DEVICE:
883 return TCE_PCI_WRITE;
884 case DMA_TO_DEVICE:
885 return TCE_PCI_READ;
886 default:
887 return 0;
888 }
889}
890EXPORT_SYMBOL_GPL(iommu_direction_to_tce_perm);
891
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +1000892#ifdef CONFIG_IOMMU_API
893/*
894 * SPAPR TCE API
895 */
896static void group_release(void *iommu_data)
897{
Alexey Kardashevskiyb348aa62015-06-05 16:35:08 +1000898 struct iommu_table_group *table_group = iommu_data;
899
900 table_group->group = NULL;
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +1000901}
902
Alexey Kardashevskiyb348aa62015-06-05 16:35:08 +1000903void iommu_register_group(struct iommu_table_group *table_group,
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +1000904 int pci_domain_number, unsigned long pe_num)
905{
906 struct iommu_group *grp;
907 char *name;
908
909 grp = iommu_group_alloc();
910 if (IS_ERR(grp)) {
911 pr_warn("powerpc iommu api: cannot create new group, err=%ld\n",
912 PTR_ERR(grp));
913 return;
914 }
Alexey Kardashevskiyb348aa62015-06-05 16:35:08 +1000915 table_group->group = grp;
916 iommu_group_set_iommudata(grp, table_group, group_release);
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +1000917 name = kasprintf(GFP_KERNEL, "domain%d-pe%lx",
918 pci_domain_number, pe_num);
919 if (!name)
920 return;
921 iommu_group_set_name(grp, name);
922 kfree(name);
923}
924
925enum dma_data_direction iommu_tce_direction(unsigned long tce)
926{
927 if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE))
928 return DMA_BIDIRECTIONAL;
929 else if (tce & TCE_PCI_READ)
930 return DMA_TO_DEVICE;
931 else if (tce & TCE_PCI_WRITE)
932 return DMA_FROM_DEVICE;
933 else
934 return DMA_NONE;
935}
936EXPORT_SYMBOL_GPL(iommu_tce_direction);
937
938void iommu_flush_tce(struct iommu_table *tbl)
939{
940 /* Flush/invalidate TLB caches if necessary */
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +1000941 if (tbl->it_ops->flush)
942 tbl->it_ops->flush(tbl);
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +1000943
944 /* Make sure updates are seen by hardware */
945 mb();
946}
947EXPORT_SYMBOL_GPL(iommu_flush_tce);
948
949int iommu_tce_clear_param_check(struct iommu_table *tbl,
950 unsigned long ioba, unsigned long tce_value,
951 unsigned long npages)
952{
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +1000953 /* tbl->it_ops->clear() does not support any value but 0 */
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +1000954 if (tce_value)
955 return -EINVAL;
956
Alistair Poppled0847752013-12-09 18:17:03 +1100957 if (ioba & ~IOMMU_PAGE_MASK(tbl))
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +1000958 return -EINVAL;
959
Alistair Poppled0847752013-12-09 18:17:03 +1100960 ioba >>= tbl->it_page_shift;
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +1000961 if (ioba < tbl->it_offset)
962 return -EINVAL;
963
964 if ((ioba + npages) > (tbl->it_offset + tbl->it_size))
965 return -EINVAL;
966
967 return 0;
968}
969EXPORT_SYMBOL_GPL(iommu_tce_clear_param_check);
970
971int iommu_tce_put_param_check(struct iommu_table *tbl,
972 unsigned long ioba, unsigned long tce)
973{
Alexey Kardashevskiy05c6cfb2015-06-05 16:35:15 +1000974 if (tce & ~IOMMU_PAGE_MASK(tbl))
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +1000975 return -EINVAL;
976
Alistair Poppled0847752013-12-09 18:17:03 +1100977 if (ioba & ~IOMMU_PAGE_MASK(tbl))
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +1000978 return -EINVAL;
979
Alistair Poppled0847752013-12-09 18:17:03 +1100980 ioba >>= tbl->it_page_shift;
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +1000981 if (ioba < tbl->it_offset)
982 return -EINVAL;
983
984 if ((ioba + 1) > (tbl->it_offset + tbl->it_size))
985 return -EINVAL;
986
987 return 0;
988}
989EXPORT_SYMBOL_GPL(iommu_tce_put_param_check);
990
Alexey Kardashevskiy05c6cfb2015-06-05 16:35:15 +1000991long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
992 unsigned long *hpa, enum dma_data_direction *direction)
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +1000993{
Alexey Kardashevskiy05c6cfb2015-06-05 16:35:15 +1000994 long ret;
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +1000995
Alexey Kardashevskiy05c6cfb2015-06-05 16:35:15 +1000996 ret = tbl->it_ops->exchange(tbl, entry, hpa, direction);
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +1000997
Alexey Kardashevskiy05c6cfb2015-06-05 16:35:15 +1000998 if (!ret && ((*direction == DMA_FROM_DEVICE) ||
999 (*direction == DMA_BIDIRECTIONAL)))
1000 SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT));
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001001
1002 /* if (unlikely(ret))
1003 pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n",
Alexey Kardashevskiy84f19662014-07-15 19:24:25 +10001004 __func__, hwaddr, entry << tbl->it_page_shift,
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001005 hwaddr, ret); */
1006
1007 return ret;
1008}
Alexey Kardashevskiy05c6cfb2015-06-05 16:35:15 +10001009EXPORT_SYMBOL_GPL(iommu_tce_xchg);
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001010
Alexey Kardashevskiya540aa52017-03-22 15:21:48 +11001011#ifdef CONFIG_PPC_BOOK3S_64
1012long iommu_tce_xchg_rm(struct iommu_table *tbl, unsigned long entry,
1013 unsigned long *hpa, enum dma_data_direction *direction)
1014{
1015 long ret;
1016
1017 ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
1018
1019 if (!ret && ((*direction == DMA_FROM_DEVICE) ||
1020 (*direction == DMA_BIDIRECTIONAL))) {
1021 struct page *pg = realmode_pfn_to_page(*hpa >> PAGE_SHIFT);
1022
1023 if (likely(pg)) {
1024 SetPageDirty(pg);
1025 } else {
1026 tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
1027 ret = -EFAULT;
1028 }
1029 }
1030
1031 return ret;
1032}
1033EXPORT_SYMBOL_GPL(iommu_tce_xchg_rm);
1034#endif
1035
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001036int iommu_take_ownership(struct iommu_table *tbl)
1037{
Alexey Kardashevskiyb82c75b2015-06-05 16:35:11 +10001038 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1039 int ret = 0;
1040
Alexey Kardashevskiy05c6cfb2015-06-05 16:35:15 +10001041 /*
1042 * VFIO does not control TCE entries allocation and the guest
1043 * can write new TCEs on top of existing ones so iommu_tce_build()
1044 * must be able to release old pages. This functionality
1045 * requires exchange() callback defined so if it is not
1046 * implemented, we disallow taking ownership over the table.
1047 */
1048 if (!tbl->it_ops->exchange)
1049 return -EINVAL;
1050
Alexey Kardashevskiyb82c75b2015-06-05 16:35:11 +10001051 spin_lock_irqsave(&tbl->large_pool.lock, flags);
1052 for (i = 0; i < tbl->nr_pools; i++)
1053 spin_lock(&tbl->pools[i].lock);
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001054
1055 if (tbl->it_offset == 0)
1056 clear_bit(0, tbl->it_map);
1057
1058 if (!bitmap_empty(tbl->it_map, tbl->it_size)) {
1059 pr_err("iommu_tce: it_map is not empty");
Alexey Kardashevskiyb82c75b2015-06-05 16:35:11 +10001060 ret = -EBUSY;
1061 /* Restore bit#0 set by iommu_init_table() */
1062 if (tbl->it_offset == 0)
1063 set_bit(0, tbl->it_map);
1064 } else {
1065 memset(tbl->it_map, 0xff, sz);
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001066 }
1067
Alexey Kardashevskiyb82c75b2015-06-05 16:35:11 +10001068 for (i = 0; i < tbl->nr_pools; i++)
1069 spin_unlock(&tbl->pools[i].lock);
1070 spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001071
Alexey Kardashevskiyb82c75b2015-06-05 16:35:11 +10001072 return ret;
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001073}
1074EXPORT_SYMBOL_GPL(iommu_take_ownership);
1075
1076void iommu_release_ownership(struct iommu_table *tbl)
1077{
Alexey Kardashevskiyb82c75b2015-06-05 16:35:11 +10001078 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1079
1080 spin_lock_irqsave(&tbl->large_pool.lock, flags);
1081 for (i = 0; i < tbl->nr_pools; i++)
1082 spin_lock(&tbl->pools[i].lock);
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001083
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001084 memset(tbl->it_map, 0, sz);
1085
1086 /* Restore bit#0 set by iommu_init_table() */
1087 if (tbl->it_offset == 0)
1088 set_bit(0, tbl->it_map);
Alexey Kardashevskiyb82c75b2015-06-05 16:35:11 +10001089
1090 for (i = 0; i < tbl->nr_pools; i++)
1091 spin_unlock(&tbl->pools[i].lock);
1092 spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001093}
1094EXPORT_SYMBOL_GPL(iommu_release_ownership);
1095
Alexey Kardashevskiyd905c5d2013-11-21 17:43:14 +11001096int iommu_add_device(struct device *dev)
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001097{
1098 struct iommu_table *tbl;
Alexey Kardashevskiy0eaf4de2015-06-05 16:35:09 +10001099 struct iommu_table_group_link *tgl;
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001100
Gavin Shan763fe0a2014-08-06 17:10:16 +10001101 /*
1102 * The sysfs entries should be populated before
1103 * binding IOMMU group. If sysfs entries isn't
1104 * ready, we simply bail.
1105 */
1106 if (!device_is_registered(dev))
1107 return -ENOENT;
1108
1109 if (dev->iommu_group) {
1110 pr_debug("%s: Skipping device %s with iommu group %d\n",
1111 __func__, dev_name(dev),
1112 iommu_group_id(dev->iommu_group));
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001113 return -EBUSY;
1114 }
1115
1116 tbl = get_iommu_table_base(dev);
Alexey Kardashevskiy0eaf4de2015-06-05 16:35:09 +10001117 if (!tbl) {
Gavin Shan763fe0a2014-08-06 17:10:16 +10001118 pr_debug("%s: Skipping device %s with no tbl\n",
1119 __func__, dev_name(dev));
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001120 return 0;
1121 }
1122
Alexey Kardashevskiy0eaf4de2015-06-05 16:35:09 +10001123 tgl = list_first_entry_or_null(&tbl->it_group_list,
1124 struct iommu_table_group_link, next);
1125 if (!tgl) {
1126 pr_debug("%s: Skipping device %s with no group\n",
1127 __func__, dev_name(dev));
1128 return 0;
1129 }
Gavin Shan763fe0a2014-08-06 17:10:16 +10001130 pr_debug("%s: Adding %s to iommu group %d\n",
1131 __func__, dev_name(dev),
Alexey Kardashevskiy0eaf4de2015-06-05 16:35:09 +10001132 iommu_group_id(tgl->table_group->group));
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001133
Alistair Poppled0847752013-12-09 18:17:03 +11001134 if (PAGE_SIZE < IOMMU_PAGE_SIZE(tbl)) {
Gavin Shan763fe0a2014-08-06 17:10:16 +10001135 pr_err("%s: Invalid IOMMU page size %lx (%lx) on %s\n",
1136 __func__, IOMMU_PAGE_SIZE(tbl),
1137 PAGE_SIZE, dev_name(dev));
Alistair Poppled0847752013-12-09 18:17:03 +11001138 return -EINVAL;
1139 }
1140
Alexey Kardashevskiy0eaf4de2015-06-05 16:35:09 +10001141 return iommu_group_add_device(tgl->table_group->group, dev);
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001142}
Alexey Kardashevskiyd905c5d2013-11-21 17:43:14 +11001143EXPORT_SYMBOL_GPL(iommu_add_device);
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001144
Alexey Kardashevskiyd905c5d2013-11-21 17:43:14 +11001145void iommu_del_device(struct device *dev)
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001146{
Gavin Shan0c4b9e22014-01-13 11:36:22 +08001147 /*
1148 * Some devices might not have IOMMU table and group
1149 * and we needn't detach them from the associated
1150 * IOMMU groups
1151 */
1152 if (!dev->iommu_group) {
1153 pr_debug("iommu_tce: skipping device %s with no tbl\n",
1154 dev_name(dev));
1155 return;
1156 }
1157
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001158 iommu_group_remove_device(dev);
1159}
Alexey Kardashevskiyd905c5d2013-11-21 17:43:14 +11001160EXPORT_SYMBOL_GPL(iommu_del_device);
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001161
Nishanth Aravamudan4ad04e52015-02-21 11:00:50 -08001162static int tce_iommu_bus_notifier(struct notifier_block *nb,
1163 unsigned long action, void *data)
1164{
1165 struct device *dev = data;
1166
1167 switch (action) {
1168 case BUS_NOTIFY_ADD_DEVICE:
1169 return iommu_add_device(dev);
1170 case BUS_NOTIFY_DEL_DEVICE:
1171 if (dev->iommu_group)
1172 iommu_del_device(dev);
1173 return 0;
1174 default:
1175 return 0;
1176 }
1177}
1178
1179static struct notifier_block tce_iommu_bus_nb = {
1180 .notifier_call = tce_iommu_bus_notifier,
1181};
1182
1183int __init tce_iommu_bus_notifier_init(void)
1184{
1185 bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
1186 return 0;
1187}
Alexey Kardashevskiy4e13c1a2013-05-21 13:33:09 +10001188#endif /* CONFIG_IOMMU_API */