blob: 44fc3d10f017f2e901d127b7f5c3d0025649444b [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Dynamic DMA mapping support.
4 *
Jan Beulich563aaf02007-02-05 18:51:25 -08005 * This implementation is a fallback for platforms that do not support
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * I/O TLBs (aka DMA address translation hardware).
7 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
8 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
9 * Copyright (C) 2000, 2003 Hewlett-Packard Co
10 * David Mosberger-Tang <davidm@hpl.hp.com>
11 *
12 * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
13 * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
14 * unnecessary i-cache flushing.
John W. Linville569c8bf2005-09-29 14:45:24 -070015 * 04/07/.. ak Better overflow handling. Assorted fixes.
16 * 05/09/10 linville Add support for syncing ranges, support syncing for
17 * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
Becky Brucefb05a372008-12-22 10:26:09 -080018 * 08/12/11 beckyb Add highmem support
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 */
20
Kees Cook7d63fb32018-07-10 16:22:22 -070021#define pr_fmt(fmt) "software IO TLB: " fmt
22
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/cache.h>
Christoph Hellwigea8c64a2018-01-10 16:21:13 +010024#include <linux/dma-direct.h>
Christoph Hellwig9f4df962020-09-22 15:36:11 +020025#include <linux/dma-map-ops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/mm.h>
Paul Gortmaker8bc3bcc2011-11-16 21:29:17 -050027#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/spinlock.h>
29#include <linux/string.h>
Ian Campbell0016fde2008-12-16 12:17:27 -080030#include <linux/swiotlb.h>
Becky Brucefb05a372008-12-22 10:26:09 -080031#include <linux/pfn.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <linux/types.h>
33#include <linux/ctype.h>
Jeremy Fitzhardingeef9b1892008-12-16 12:17:33 -080034#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/gfp.h>
Christoph Hellwig84be4562015-05-01 12:46:15 +020036#include <linux/scatterlist.h>
Tom Lendackyc7753202017-07-17 16:10:21 -050037#include <linux/mem_encrypt.h>
Christoph Hellwige7de6c72018-03-19 11:38:23 +010038#include <linux/set_memory.h>
Dongli Zhang71602fe2019-01-18 15:10:27 +080039#ifdef CONFIG_DEBUG_FS
40#include <linux/debugfs.h>
41#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
43#include <asm/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <asm/dma.h>
45
46#include <linux/init.h>
Mike Rapoport57c8a662018-10-30 15:09:49 -070047#include <linux/memblock.h>
FUJITA Tomonoria8522502008-04-29 00:59:36 -070048#include <linux/iommu-helper.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Thierry Redingce5be5a2013-10-23 13:32:04 +020050#define CREATE_TRACE_POINTS
Zoltan Kiss2b2b6142013-09-04 21:11:05 +010051#include <trace/events/swiotlb.h>
52
Alex Williamson0b9afed2005-09-06 11:20:49 -060053#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
54
55/*
56 * Minimum IO TLB size to bother booting with. Systems with mainly
57 * 64bit capable cards will only lightly use the swiotlb. If we can't
58 * allocate a contiguous 1MB, we're probably in trouble anyway.
59 */
60#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
61
Claire Chang73f62092021-03-18 17:14:22 +010062#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
63
Geert Uytterhoevenae7871b2016-12-16 14:28:41 +010064enum swiotlb_force swiotlb_force;
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Christoph Hellwig2d299602021-03-18 17:14:23 +010066struct io_tlb_mem *io_tlb_default_mem;
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
68/*
Konrad Rzeszutek Wilk7453c542016-12-20 10:02:02 -050069 * Max segment that we can provide which (if pages are contingous) will
70 * not be bounced (unless SWIOTLB_FORCE is set).
71 */
Andy Shevchenkob51e6272020-09-02 20:31:05 +030072static unsigned int max_segment;
Konrad Rzeszutek Wilk7453c542016-12-20 10:02:02 -050073
Christoph Hellwig2d299602021-03-18 17:14:23 +010074static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
75
Linus Torvalds1da177e2005-04-16 15:20:36 -070076static int __init
77setup_io_tlb_npages(char *str)
78{
79 if (isdigit(*str)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 /* avoid tail segment of size < IO_TLB_SEGSIZE */
Christoph Hellwig2d299602021-03-18 17:14:23 +010081 default_nslabs =
82 ALIGN(simple_strtoul(str, &str, 0), IO_TLB_SEGSIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 }
84 if (*str == ',')
85 ++str;
Florian Fainelli2726bf32021-03-22 18:53:49 -070086 if (!strcmp(str, "force"))
Geert Uytterhoevenae7871b2016-12-16 14:28:41 +010087 swiotlb_force = SWIOTLB_FORCE;
Florian Fainelli2726bf32021-03-22 18:53:49 -070088 else if (!strcmp(str, "noforce"))
Geert Uytterhoevenfff5d992016-12-16 14:28:42 +010089 swiotlb_force = SWIOTLB_NO_FORCE;
FUJITA Tomonorib18485e2009-11-12 00:03:28 +090090
Yinghai Luc729de82013-04-15 22:23:45 -070091 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
Yinghai Luc729de82013-04-15 22:23:45 -070093early_param("swiotlb", setup_io_tlb_npages);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
Konrad Rzeszutek Wilk7453c542016-12-20 10:02:02 -050095unsigned int swiotlb_max_segment(void)
96{
Christoph Hellwig2d299602021-03-18 17:14:23 +010097 return io_tlb_default_mem ? max_segment : 0;
Konrad Rzeszutek Wilk7453c542016-12-20 10:02:02 -050098}
99EXPORT_SYMBOL_GPL(swiotlb_max_segment);
100
101void swiotlb_set_max_segment(unsigned int val)
102{
103 if (swiotlb_force == SWIOTLB_FORCE)
104 max_segment = 1;
105 else
106 max_segment = rounddown(val, PAGE_SIZE);
107}
108
Yinghai Luc729de82013-04-15 22:23:45 -0700109unsigned long swiotlb_size_or_default(void)
110{
Christoph Hellwig2d299602021-03-18 17:14:23 +0100111 return default_nslabs << IO_TLB_SHIFT;
Yinghai Luc729de82013-04-15 22:23:45 -0700112}
113
Christoph Hellwig2d299602021-03-18 17:14:23 +0100114void __init swiotlb_adjust_size(unsigned long size)
Ashish Kalrae9988792020-12-10 01:25:15 +0000115{
Ashish Kalrae9988792020-12-10 01:25:15 +0000116 /*
117 * If swiotlb parameter has not been specified, give a chance to
118 * architectures such as those supporting memory encryption to
119 * adjust/expand SWIOTLB size for their use.
120 */
Christoph Hellwigdfc06b32021-04-29 08:28:59 +0200121 if (default_nslabs != IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT)
122 return;
Christoph Hellwig2d299602021-03-18 17:14:23 +0100123 size = ALIGN(size, IO_TLB_SIZE);
124 default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
125 pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20);
Ashish Kalrae9988792020-12-10 01:25:15 +0000126}
127
FUJITA Tomonoriad32e8c2009-11-10 19:46:19 +0900128void swiotlb_print_info(void)
Ian Campbell2e5b2b82008-12-16 12:17:34 -0800129{
Christoph Hellwig2d299602021-03-18 17:14:23 +0100130 struct io_tlb_mem *mem = io_tlb_default_mem;
Ian Campbell2e5b2b82008-12-16 12:17:34 -0800131
Christoph Hellwig2d299602021-03-18 17:14:23 +0100132 if (!mem) {
Kees Cook7d63fb32018-07-10 16:22:22 -0700133 pr_warn("No low mem\n");
Yinghai Luac2cbab2013-01-24 12:20:16 -0800134 return;
135 }
136
Claire Chang73f62092021-03-18 17:14:22 +0100137 pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end,
Christoph Hellwig2d299602021-03-18 17:14:23 +0100138 (mem->nslabs << IO_TLB_SHIFT) >> 20);
Ian Campbell2e5b2b82008-12-16 12:17:34 -0800139}
140
Christoph Hellwigc7fbeca2021-02-04 10:11:20 +0100141static inline unsigned long io_tlb_offset(unsigned long val)
142{
143 return val & (IO_TLB_SEGSIZE - 1);
144}
145
Christoph Hellwigc32a77fd2021-02-05 11:19:34 +0100146static inline unsigned long nr_slots(u64 val)
147{
148 return DIV_ROUND_UP(val, IO_TLB_SIZE);
149}
150
Tom Lendackyc7753202017-07-17 16:10:21 -0500151/*
152 * Early SWIOTLB allocation may be too early to allow an architecture to
153 * perform the desired operations. This function allows the architecture to
154 * call SWIOTLB when the operations are possible. It needs to be called
155 * before the SWIOTLB memory is used.
156 */
157void __init swiotlb_update_mem_attributes(void)
158{
Christoph Hellwig2d299602021-03-18 17:14:23 +0100159 struct io_tlb_mem *mem = io_tlb_default_mem;
Tom Lendackyc7753202017-07-17 16:10:21 -0500160 void *vaddr;
161 unsigned long bytes;
162
Christoph Hellwig2d299602021-03-18 17:14:23 +0100163 if (!mem || mem->late_alloc)
Tom Lendackyc7753202017-07-17 16:10:21 -0500164 return;
Claire Chang73f62092021-03-18 17:14:22 +0100165 vaddr = phys_to_virt(mem->start);
166 bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT);
Christoph Hellwige7de6c72018-03-19 11:38:23 +0100167 set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
Tom Lendackyc7753202017-07-17 16:10:21 -0500168 memset(vaddr, 0, bytes);
Tom Lendackyc7753202017-07-17 16:10:21 -0500169}
170
Claire Chang0a655792021-06-19 11:40:32 +0800171static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
172 unsigned long nslabs, bool late_alloc)
173{
174 void *vaddr = phys_to_virt(start);
175 unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
176
177 mem->nslabs = nslabs;
178 mem->start = start;
179 mem->end = mem->start + bytes;
180 mem->index = 0;
181 mem->late_alloc = late_alloc;
Claire Chang903cd0f2021-06-24 23:55:20 +0800182
183 if (swiotlb_force == SWIOTLB_FORCE)
184 mem->force_bounce = true;
185
Claire Chang0a655792021-06-19 11:40:32 +0800186 spin_lock_init(&mem->lock);
187 for (i = 0; i < mem->nslabs; i++) {
188 mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i);
189 mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
190 mem->slots[i].alloc_size = 0;
191 }
192 memset(vaddr, 0, bytes);
193}
194
Yinghai Luac2cbab2013-01-24 12:20:16 -0800195int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196{
Christoph Hellwig2d299602021-03-18 17:14:23 +0100197 struct io_tlb_mem *mem;
Mike Rapoporta0bf8422019-03-11 23:30:26 -0700198 size_t alloc_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Florian Fainelli2726bf32021-03-22 18:53:49 -0700200 if (swiotlb_force == SWIOTLB_NO_FORCE)
201 return 0;
202
Christoph Hellwig5d0538b2021-03-01 08:44:31 +0100203 /* protect against double initialization */
Christoph Hellwig2d299602021-03-18 17:14:23 +0100204 if (WARN_ON_ONCE(io_tlb_default_mem))
Christoph Hellwig5d0538b2021-03-01 08:44:31 +0100205 return -ENOMEM;
206
Christoph Hellwig2d299602021-03-18 17:14:23 +0100207 alloc_size = PAGE_ALIGN(struct_size(mem, slots, nslabs));
208 mem = memblock_alloc(alloc_size, PAGE_SIZE);
209 if (!mem)
210 panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
211 __func__, alloc_size, PAGE_SIZE);
Claire Chang0a655792021-06-19 11:40:32 +0800212
213 swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
Christoph Hellwig2d299602021-03-18 17:14:23 +0100215 io_tlb_default_mem = mem;
FUJITA Tomonoriad32e8c2009-11-10 19:46:19 +0900216 if (verbose)
217 swiotlb_print_info();
Claire Chang73f62092021-03-18 17:14:22 +0100218 swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT);
Yinghai Luac2cbab2013-01-24 12:20:16 -0800219 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220}
221
FUJITA Tomonoriabbceff2010-05-10 15:15:12 -0400222/*
223 * Statically reserve bounce buffer space and initialize bounce buffer data
224 * structures for the software IO TLB used to implement the DMA API.
225 */
Yinghai Luac2cbab2013-01-24 12:20:16 -0800226void __init
227swiotlb_init(int verbose)
FUJITA Tomonoriabbceff2010-05-10 15:15:12 -0400228{
Christoph Hellwig2d299602021-03-18 17:14:23 +0100229 size_t bytes = PAGE_ALIGN(default_nslabs << IO_TLB_SHIFT);
230 void *tlb;
FUJITA Tomonoriabbceff2010-05-10 15:15:12 -0400231
Florian Fainelli2726bf32021-03-22 18:53:49 -0700232 if (swiotlb_force == SWIOTLB_NO_FORCE)
233 return;
234
Yinghai Luac2cbab2013-01-24 12:20:16 -0800235 /* Get IO TLB memory from the low pages */
Christoph Hellwig2d299602021-03-18 17:14:23 +0100236 tlb = memblock_alloc_low(bytes, PAGE_SIZE);
237 if (!tlb)
238 goto fail;
239 if (swiotlb_init_with_tbl(tlb, default_nslabs, verbose))
240 goto fail_free_mem;
241 return;
FUJITA Tomonoriabbceff2010-05-10 15:15:12 -0400242
Christoph Hellwig2d299602021-03-18 17:14:23 +0100243fail_free_mem:
244 memblock_free_early(__pa(tlb), bytes);
245fail:
Kees Cook7d63fb32018-07-10 16:22:22 -0700246 pr_warn("Cannot allocate buffer");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247}
248
Alex Williamson0b9afed2005-09-06 11:20:49 -0600249/*
250 * Systems with larger DMA zones (those that don't support ISA) can
251 * initialize the swiotlb later using the slab allocator if needed.
252 * This should be just like above, but with some error catching.
253 */
254int
Jan Beulich563aaf02007-02-05 18:51:25 -0800255swiotlb_late_init_with_default_size(size_t default_size)
Alex Williamson0b9afed2005-09-06 11:20:49 -0600256{
Christoph Hellwig2d299602021-03-18 17:14:23 +0100257 unsigned long nslabs =
258 ALIGN(default_size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
259 unsigned long bytes;
Alexander Duyckff7204a2012-10-15 10:19:28 -0700260 unsigned char *vstart = NULL;
Alex Williamson0b9afed2005-09-06 11:20:49 -0600261 unsigned int order;
Konrad Rzeszutek Wilk74838b72012-07-27 20:55:27 -0400262 int rc = 0;
Alex Williamson0b9afed2005-09-06 11:20:49 -0600263
Florian Fainelli2726bf32021-03-22 18:53:49 -0700264 if (swiotlb_force == SWIOTLB_NO_FORCE)
265 return 0;
266
Alex Williamson0b9afed2005-09-06 11:20:49 -0600267 /*
268 * Get IO TLB memory from the low pages
269 */
Christoph Hellwig2d299602021-03-18 17:14:23 +0100270 order = get_order(nslabs << IO_TLB_SHIFT);
271 nslabs = SLABS_PER_PAGE << order;
272 bytes = nslabs << IO_TLB_SHIFT;
Alex Williamson0b9afed2005-09-06 11:20:49 -0600273
274 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
Alexander Duyckff7204a2012-10-15 10:19:28 -0700275 vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
276 order);
277 if (vstart)
Alex Williamson0b9afed2005-09-06 11:20:49 -0600278 break;
279 order--;
280 }
281
Christoph Hellwig2d299602021-03-18 17:14:23 +0100282 if (!vstart)
Konrad Rzeszutek Wilk74838b72012-07-27 20:55:27 -0400283 return -ENOMEM;
Christoph Hellwig2d299602021-03-18 17:14:23 +0100284
Jan Beulich563aaf02007-02-05 18:51:25 -0800285 if (order != get_order(bytes)) {
Kees Cook7d63fb32018-07-10 16:22:22 -0700286 pr_warn("only able to allocate %ld MB\n",
287 (PAGE_SIZE << order) >> 20);
Christoph Hellwig2d299602021-03-18 17:14:23 +0100288 nslabs = SLABS_PER_PAGE << order;
Alex Williamson0b9afed2005-09-06 11:20:49 -0600289 }
Christoph Hellwig2d299602021-03-18 17:14:23 +0100290 rc = swiotlb_late_init_with_tbl(vstart, nslabs);
Konrad Rzeszutek Wilk74838b72012-07-27 20:55:27 -0400291 if (rc)
Alexander Duyckff7204a2012-10-15 10:19:28 -0700292 free_pages((unsigned long)vstart, order);
Konrad Rzeszutek Wilk7453c542016-12-20 10:02:02 -0500293
Konrad Rzeszutek Wilk74838b72012-07-27 20:55:27 -0400294 return rc;
295}
296
297int
298swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
299{
Christoph Hellwig2d299602021-03-18 17:14:23 +0100300 struct io_tlb_mem *mem;
Claire Chang0a655792021-06-19 11:40:32 +0800301 unsigned long bytes = nslabs << IO_TLB_SHIFT;
Konrad Rzeszutek Wilk74838b72012-07-27 20:55:27 -0400302
Florian Fainelli2726bf32021-03-22 18:53:49 -0700303 if (swiotlb_force == SWIOTLB_NO_FORCE)
304 return 0;
305
Christoph Hellwig5d0538b2021-03-01 08:44:31 +0100306 /* protect against double initialization */
Christoph Hellwig2d299602021-03-18 17:14:23 +0100307 if (WARN_ON_ONCE(io_tlb_default_mem))
Christoph Hellwig5d0538b2021-03-01 08:44:31 +0100308 return -ENOMEM;
309
Christoph Hellwig2d299602021-03-18 17:14:23 +0100310 mem = (void *)__get_free_pages(GFP_KERNEL,
311 get_order(struct_size(mem, slots, nslabs)));
312 if (!mem)
313 return -ENOMEM;
Konrad Rzeszutek Wilk74838b72012-07-27 20:55:27 -0400314
Claire Chang0a655792021-06-19 11:40:32 +0800315 memset(mem, 0, sizeof(*mem));
Christoph Hellwige7de6c72018-03-19 11:38:23 +0100316 set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT);
Claire Chang0a655792021-06-19 11:40:32 +0800317 swiotlb_init_io_tlb_mem(mem, virt_to_phys(tlb), nslabs, true);
Alex Williamson0b9afed2005-09-06 11:20:49 -0600318
Christoph Hellwig2d299602021-03-18 17:14:23 +0100319 io_tlb_default_mem = mem;
FUJITA Tomonoriad32e8c2009-11-10 19:46:19 +0900320 swiotlb_print_info();
Claire Chang73f62092021-03-18 17:14:22 +0100321 swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT);
Alex Williamson0b9afed2005-09-06 11:20:49 -0600322 return 0;
Alex Williamson0b9afed2005-09-06 11:20:49 -0600323}
324
Christoph Hellwig7f2c8bb2017-12-23 14:14:54 +0100325void __init swiotlb_exit(void)
FUJITA Tomonori5740afd2009-11-10 19:46:18 +0900326{
Christoph Hellwig2d299602021-03-18 17:14:23 +0100327 struct io_tlb_mem *mem = io_tlb_default_mem;
328 size_t size;
Claire Chang73f62092021-03-18 17:14:22 +0100329
Christoph Hellwig2d299602021-03-18 17:14:23 +0100330 if (!mem)
FUJITA Tomonori5740afd2009-11-10 19:46:18 +0900331 return;
332
Christoph Hellwig2d299602021-03-18 17:14:23 +0100333 size = struct_size(mem, slots, mem->nslabs);
334 if (mem->late_alloc)
335 free_pages((unsigned long)mem, get_order(size));
336 else
337 memblock_free_late(__pa(mem), PAGE_ALIGN(size));
338 io_tlb_default_mem = NULL;
FUJITA Tomonori5740afd2009-11-10 19:46:18 +0900339}
340
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341/*
Bumyong Lee5f894682021-05-10 18:10:04 +0900342 * Return the offset into a iotlb slot required to keep the device happy.
343 */
344static unsigned int swiotlb_align_offset(struct device *dev, u64 addr)
345{
346 return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1);
347}
348
349/*
Dongli Zhang6442ca22019-01-18 15:10:26 +0800350 * Bounce: copy the swiotlb buffer from or back to the original dma location
Becky Brucefb05a372008-12-22 10:26:09 -0800351 */
Christoph Hellwig2bdba622021-03-01 08:44:25 +0100352static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size,
353 enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354{
Claire Chang69031f52021-06-19 11:40:34 +0800355 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
Claire Chang73f62092021-03-18 17:14:22 +0100356 int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT;
Christoph Hellwig2d299602021-03-18 17:14:23 +0100357 phys_addr_t orig_addr = mem->slots[index].orig_addr;
358 size_t alloc_size = mem->slots[index].alloc_size;
Alexander Duyckaf51a9f2012-10-15 10:19:55 -0700359 unsigned long pfn = PFN_DOWN(orig_addr);
360 unsigned char *vaddr = phys_to_virt(tlb_addr);
Bumyong Lee5f894682021-05-10 18:10:04 +0900361 unsigned int tlb_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
Christoph Hellwig2bdba622021-03-01 08:44:25 +0100363 if (orig_addr == INVALID_PHYS_ADDR)
364 return;
365
Bumyong Lee5f894682021-05-10 18:10:04 +0900366 tlb_offset = (tlb_addr & (IO_TLB_SIZE - 1)) -
367 swiotlb_align_offset(dev, orig_addr);
368
369 orig_addr += tlb_offset;
370 alloc_size -= tlb_offset;
371
Christoph Hellwig2bdba622021-03-01 08:44:25 +0100372 if (size > alloc_size) {
373 dev_WARN_ONCE(dev, 1,
374 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n",
375 alloc_size, size);
376 size = alloc_size;
377 }
378
Becky Brucefb05a372008-12-22 10:26:09 -0800379 if (PageHighMem(pfn_to_page(pfn))) {
380 /* The buffer does not have a mapping. Map it in and copy */
Alexander Duyckaf51a9f2012-10-15 10:19:55 -0700381 unsigned int offset = orig_addr & ~PAGE_MASK;
Becky Brucefb05a372008-12-22 10:26:09 -0800382 char *buffer;
383 unsigned int sz = 0;
384 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
Becky Brucefb05a372008-12-22 10:26:09 -0800386 while (size) {
Becky Bruce67131ad2009-04-08 09:09:16 -0500387 sz = min_t(size_t, PAGE_SIZE - offset, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388
Becky Brucefb05a372008-12-22 10:26:09 -0800389 local_irq_save(flags);
Cong Wangc3eede82011-11-25 23:14:39 +0800390 buffer = kmap_atomic(pfn_to_page(pfn));
Becky Brucefb05a372008-12-22 10:26:09 -0800391 if (dir == DMA_TO_DEVICE)
Alexander Duyckaf51a9f2012-10-15 10:19:55 -0700392 memcpy(vaddr, buffer + offset, sz);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 else
Alexander Duyckaf51a9f2012-10-15 10:19:55 -0700394 memcpy(buffer + offset, vaddr, sz);
Cong Wangc3eede82011-11-25 23:14:39 +0800395 kunmap_atomic(buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 local_irq_restore(flags);
Becky Brucefb05a372008-12-22 10:26:09 -0800397
398 size -= sz;
399 pfn++;
Alexander Duyckaf51a9f2012-10-15 10:19:55 -0700400 vaddr += sz;
Becky Brucefb05a372008-12-22 10:26:09 -0800401 offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 }
Alexander Duyckaf51a9f2012-10-15 10:19:55 -0700403 } else if (dir == DMA_TO_DEVICE) {
404 memcpy(vaddr, phys_to_virt(orig_addr), size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 } else {
Alexander Duyckaf51a9f2012-10-15 10:19:55 -0700406 memcpy(phys_to_virt(orig_addr), vaddr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 }
408}
409
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100410#define slot_addr(start, idx) ((start) + ((idx) << IO_TLB_SHIFT))
411
412/*
413 * Carefully handle integer overflow which can occur when boundary_mask == ~0UL.
414 */
415static inline unsigned long get_max_slots(unsigned long boundary_mask)
416{
417 if (boundary_mask == ~0UL)
418 return 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
419 return nr_slots(boundary_mask + 1);
420}
421
Claire Chang73f62092021-03-18 17:14:22 +0100422static unsigned int wrap_index(struct io_tlb_mem *mem, unsigned int index)
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100423{
Claire Chang73f62092021-03-18 17:14:22 +0100424 if (index >= mem->nslabs)
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100425 return 0;
426 return index;
427}
428
429/*
430 * Find a suitable number of IO TLB entries size that will fit this request and
431 * allocate a buffer from that IO TLB pool.
432 */
Claire Chang36f7b2f2021-06-24 23:55:21 +0800433static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
434 size_t alloc_size)
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100435{
Claire Chang69031f52021-06-19 11:40:34 +0800436 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100437 unsigned long boundary_mask = dma_get_seg_boundary(dev);
438 dma_addr_t tbl_dma_addr =
Claire Chang73f62092021-03-18 17:14:22 +0100439 phys_to_dma_unencrypted(dev, mem->start) & boundary_mask;
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100440 unsigned long max_slots = get_max_slots(boundary_mask);
Christoph Hellwig1f221a02021-02-22 14:39:44 -0500441 unsigned int iotlb_align_mask =
442 dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1);
443 unsigned int nslots = nr_slots(alloc_size), stride;
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100444 unsigned int index, wrap, count = 0, i;
Claire Chang36f7b2f2021-06-24 23:55:21 +0800445 unsigned int offset = swiotlb_align_offset(dev, orig_addr);
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100446 unsigned long flags;
447
448 BUG_ON(!nslots);
449
450 /*
Christoph Hellwig1f221a02021-02-22 14:39:44 -0500451 * For mappings with an alignment requirement don't bother looping to
452 * unaligned slots once we found an aligned one. For allocations of
453 * PAGE_SIZE or larger only look for page aligned allocations.
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100454 */
Christoph Hellwig1f221a02021-02-22 14:39:44 -0500455 stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1;
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100456 if (alloc_size >= PAGE_SIZE)
Christoph Hellwig1f221a02021-02-22 14:39:44 -0500457 stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT));
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100458
Claire Chang73f62092021-03-18 17:14:22 +0100459 spin_lock_irqsave(&mem->lock, flags);
460 if (unlikely(nslots > mem->nslabs - mem->used))
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100461 goto not_found;
462
Claire Chang73f62092021-03-18 17:14:22 +0100463 index = wrap = wrap_index(mem, ALIGN(mem->index, stride));
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100464 do {
Claire Changf4111e32021-06-19 11:40:40 +0800465 if (orig_addr &&
466 (slot_addr(tbl_dma_addr, index) & iotlb_align_mask) !=
467 (orig_addr & iotlb_align_mask)) {
Claire Chang73f62092021-03-18 17:14:22 +0100468 index = wrap_index(mem, index + 1);
Christoph Hellwig1f221a02021-02-22 14:39:44 -0500469 continue;
470 }
471
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100472 /*
473 * If we find a slot that indicates we have 'nslots' number of
474 * contiguous buffers, we allocate the buffers from that slot
475 * and mark the entries as '0' indicating unavailable.
476 */
477 if (!iommu_is_span_boundary(index, nslots,
478 nr_slots(tbl_dma_addr),
479 max_slots)) {
Christoph Hellwig2d299602021-03-18 17:14:23 +0100480 if (mem->slots[index].list >= nslots)
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100481 goto found;
482 }
Claire Chang73f62092021-03-18 17:14:22 +0100483 index = wrap_index(mem, index + stride);
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100484 } while (index != wrap);
485
486not_found:
Claire Chang73f62092021-03-18 17:14:22 +0100487 spin_unlock_irqrestore(&mem->lock, flags);
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100488 return -1;
489
490found:
Claire Chang36f7b2f2021-06-24 23:55:21 +0800491 for (i = index; i < index + nslots; i++) {
Christoph Hellwig2d299602021-03-18 17:14:23 +0100492 mem->slots[i].list = 0;
Claire Chang36f7b2f2021-06-24 23:55:21 +0800493 mem->slots[i].alloc_size =
494 alloc_size - (offset + ((i - index) << IO_TLB_SHIFT));
495 }
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100496 for (i = index - 1;
497 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 &&
Christoph Hellwig2d299602021-03-18 17:14:23 +0100498 mem->slots[i].list; i--)
499 mem->slots[i].list = ++count;
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100500
501 /*
502 * Update the indices to avoid searching in the next round.
503 */
Claire Chang73f62092021-03-18 17:14:22 +0100504 if (index + nslots < mem->nslabs)
505 mem->index = index + nslots;
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100506 else
Claire Chang73f62092021-03-18 17:14:22 +0100507 mem->index = 0;
508 mem->used += nslots;
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100509
Claire Chang73f62092021-03-18 17:14:22 +0100510 spin_unlock_irqrestore(&mem->lock, flags);
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100511 return index;
512}
513
514phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
Christoph Hellwigfc0021a2020-10-23 08:33:09 +0200515 size_t mapping_size, size_t alloc_size,
516 enum dma_data_direction dir, unsigned long attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517{
Claire Chang69031f52021-06-19 11:40:34 +0800518 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
Christoph Hellwig1f221a02021-02-22 14:39:44 -0500519 unsigned int offset = swiotlb_align_offset(dev, orig_addr);
Claire Chang95b079d2021-04-22 16:14:53 +0800520 unsigned int i;
521 int index;
Alexander Duycke05ed4d2012-10-15 10:19:39 -0700522 phys_addr_t tlb_addr;
FUJITA Tomonori681cc5c2008-02-04 22:28:16 -0800523
Christoph Hellwig2d299602021-03-18 17:14:23 +0100524 if (!mem)
Yinghai Luac2cbab2013-01-24 12:20:16 -0800525 panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
526
Tom Lendackyd7b417f2017-10-20 09:30:53 -0500527 if (mem_encrypt_active())
Thiago Jung Bauermann47e5d8f2019-08-06 01:49:15 -0300528 pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
Tom Lendacky648babb2017-07-17 16:10:22 -0500529
Lu Baolu3fc1ca02019-09-06 14:14:48 +0800530 if (mapping_size > alloc_size) {
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100531 dev_warn_once(dev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)",
Lu Baolu3fc1ca02019-09-06 14:14:48 +0800532 mapping_size, alloc_size);
533 return (phys_addr_t)DMA_MAPPING_ERROR;
534 }
535
Claire Chang36f7b2f2021-06-24 23:55:21 +0800536 index = swiotlb_find_slots(dev, orig_addr, alloc_size + offset);
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100537 if (index == -1) {
538 if (!(attrs & DMA_ATTR_NO_WARN))
539 dev_warn_ratelimited(dev,
540 "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
Claire Chang73f62092021-03-18 17:14:22 +0100541 alloc_size, mem->nslabs, mem->used);
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100542 return (phys_addr_t)DMA_MAPPING_ERROR;
543 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544
545 /*
546 * Save away the mapping from the original address to the DMA address.
547 * This is needed when we sync the memory. Then we sync the buffer if
548 * needed.
549 */
Claire Chang36f7b2f2021-06-24 23:55:21 +0800550 for (i = 0; i < nr_slots(alloc_size + offset); i++)
Christoph Hellwig2d299602021-03-18 17:14:23 +0100551 mem->slots[index + i].orig_addr = slot_addr(orig_addr, i);
Claire Chang73f62092021-03-18 17:14:22 +0100552 tlb_addr = slot_addr(mem->start, index) + offset;
Alexander Duyck0443fa02016-11-02 07:13:02 -0400553 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
554 (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
Christoph Hellwig2bdba622021-03-01 08:44:25 +0100555 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE);
Alexander Duycke05ed4d2012-10-15 10:19:39 -0700556 return tlb_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557}
558
Claire Chang70347872021-06-19 11:40:39 +0800559static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560{
Claire Chang70347872021-06-19 11:40:39 +0800561 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 unsigned long flags;
Claire Chang70347872021-06-19 11:40:39 +0800563 unsigned int offset = swiotlb_align_offset(dev, tlb_addr);
Claire Chang73f62092021-03-18 17:14:22 +0100564 int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT;
Christoph Hellwig2d299602021-03-18 17:14:23 +0100565 int nslots = nr_slots(mem->slots[index].alloc_size + offset);
Christoph Hellwig2bdba622021-03-01 08:44:25 +0100566 int count, i;
Martin Radevdaf95142021-01-12 16:07:29 +0100567
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 * Return the buffer to the free list by setting the corresponding
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200570 * entries to indicate the number of contiguous entries available.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 * While returning the entries to the free list, we merge the entries
572 * with slots below and above the pool being returned.
573 */
Claire Chang73f62092021-03-18 17:14:22 +0100574 spin_lock_irqsave(&mem->lock, flags);
Christoph Hellwigca10d0f2021-02-04 10:13:40 +0100575 if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE))
Christoph Hellwig2d299602021-03-18 17:14:23 +0100576 count = mem->slots[index + nslots].list;
Christoph Hellwigca10d0f2021-02-04 10:13:40 +0100577 else
578 count = 0;
Dongli Zhang71602fe2019-01-18 15:10:27 +0800579
Christoph Hellwigca10d0f2021-02-04 10:13:40 +0100580 /*
581 * Step 1: return the slots to the free list, merging the slots with
582 * superceeding slots
583 */
584 for (i = index + nslots - 1; i >= index; i--) {
Christoph Hellwig2d299602021-03-18 17:14:23 +0100585 mem->slots[i].list = ++count;
586 mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
587 mem->slots[i].alloc_size = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 }
Christoph Hellwigca10d0f2021-02-04 10:13:40 +0100589
590 /*
591 * Step 2: merge the returned slots with the preceding slots, if
592 * available (non zero)
593 */
594 for (i = index - 1;
Christoph Hellwig2d299602021-03-18 17:14:23 +0100595 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list;
Christoph Hellwigca10d0f2021-02-04 10:13:40 +0100596 i--)
Christoph Hellwig2d299602021-03-18 17:14:23 +0100597 mem->slots[i].list = ++count;
Claire Chang73f62092021-03-18 17:14:22 +0100598 mem->used -= nslots;
599 spin_unlock_irqrestore(&mem->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600}
601
Claire Chang70347872021-06-19 11:40:39 +0800602/*
603 * tlb_addr is the physical address of the bounce buffer to unmap.
604 */
605void swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr,
606 size_t mapping_size, enum dma_data_direction dir,
607 unsigned long attrs)
608{
609 /*
610 * First, sync the memory before unmapping the entry
611 */
612 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
613 (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
614 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_FROM_DEVICE);
615
616 swiotlb_release_slots(dev, tlb_addr);
617}
618
Christoph Hellwig80808d22021-03-01 08:44:26 +0100619void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
620 size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621{
Christoph Hellwig80808d22021-03-01 08:44:26 +0100622 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
623 swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE);
624 else
625 BUG_ON(dir != DMA_FROM_DEVICE);
626}
627
628void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
629 size_t size, enum dma_data_direction dir)
630{
631 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
632 swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE);
633 else
634 BUG_ON(dir != DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635}
636
Christoph Hellwig55897af2018-12-03 11:43:54 +0100637/*
Christoph Hellwig4a47cba2020-02-03 14:44:38 +0100638 * Create a swiotlb mapping for the buffer at @paddr, and in case of DMAing
Christoph Hellwig55897af2018-12-03 11:43:54 +0100639 * to the device copy the data into it as well.
640 */
Christoph Hellwig4a47cba2020-02-03 14:44:38 +0100641dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
642 enum dma_data_direction dir, unsigned long attrs)
Christoph Hellwigc4dae362018-08-20 16:21:10 +0200643{
Christoph Hellwig4a47cba2020-02-03 14:44:38 +0100644 phys_addr_t swiotlb_addr;
645 dma_addr_t dma_addr;
Christoph Hellwigc4dae362018-08-20 16:21:10 +0200646
Christoph Hellwig4a47cba2020-02-03 14:44:38 +0100647 trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size,
648 swiotlb_force);
Christoph Hellwigc4dae362018-08-20 16:21:10 +0200649
Christoph Hellwigfc0021a2020-10-23 08:33:09 +0200650 swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, dir,
651 attrs);
Christoph Hellwig4a47cba2020-02-03 14:44:38 +0100652 if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
653 return DMA_MAPPING_ERROR;
Christoph Hellwigc4dae362018-08-20 16:21:10 +0200654
655 /* Ensure that the address returned is DMA'ble */
Christoph Hellwig5ceda742020-08-17 17:34:03 +0200656 dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr);
Christoph Hellwig4a47cba2020-02-03 14:44:38 +0100657 if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
Christoph Hellwig29730732021-03-01 08:44:24 +0100658 swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, dir,
Christoph Hellwigc4dae362018-08-20 16:21:10 +0200659 attrs | DMA_ATTR_SKIP_CPU_SYNC);
Christoph Hellwig4a47cba2020-02-03 14:44:38 +0100660 dev_WARN_ONCE(dev, 1,
661 "swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
662 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
663 return DMA_MAPPING_ERROR;
Christoph Hellwigc4dae362018-08-20 16:21:10 +0200664 }
665
Christoph Hellwig4a47cba2020-02-03 14:44:38 +0100666 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
667 arch_sync_dma_for_device(swiotlb_addr, size, dir);
668 return dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669}
670
Joerg Roedelabe420b2019-02-07 12:59:13 +0100671size_t swiotlb_max_mapping_size(struct device *dev)
672{
Christoph Hellwigb5d7ccb2021-02-05 11:18:40 +0100673 return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE;
Joerg Roedelabe420b2019-02-07 12:59:13 +0100674}
Joerg Roedel492366f2019-02-07 12:59:14 +0100675
Claire Chang6f2beb22021-06-19 11:40:36 +0800676bool is_swiotlb_active(struct device *dev)
Joerg Roedel492366f2019-02-07 12:59:14 +0100677{
Claire Chang6f2beb22021-06-19 11:40:36 +0800678 return dev->dma_io_tlb_mem != NULL;
Joerg Roedel492366f2019-02-07 12:59:14 +0100679}
Christoph Hellwig2cbc2772021-03-18 17:14:24 +0100680EXPORT_SYMBOL_GPL(is_swiotlb_active);
Linus Torvalds45ba8d52019-03-10 12:47:57 -0700681
Dongli Zhang71602fe2019-01-18 15:10:27 +0800682#ifdef CONFIG_DEBUG_FS
Claire Chang6e675a12021-06-19 11:40:33 +0800683static struct dentry *debugfs_dir;
Dongli Zhang71602fe2019-01-18 15:10:27 +0800684
Claire Chang6e675a12021-06-19 11:40:33 +0800685static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem)
686{
687 debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs);
688 debugfs_create_ulong("io_tlb_used", 0400, mem->debugfs, &mem->used);
689}
690
691static int __init swiotlb_create_default_debugfs(void)
Dongli Zhang71602fe2019-01-18 15:10:27 +0800692{
Christoph Hellwig2d299602021-03-18 17:14:23 +0100693 struct io_tlb_mem *mem = io_tlb_default_mem;
Dongli Zhang71602fe2019-01-18 15:10:27 +0800694
Claire Chang6e675a12021-06-19 11:40:33 +0800695 debugfs_dir = debugfs_create_dir("swiotlb", NULL);
696 if (mem) {
697 mem->debugfs = debugfs_dir;
698 swiotlb_create_debugfs_files(mem);
699 }
Dongli Zhang71602fe2019-01-18 15:10:27 +0800700 return 0;
Dongli Zhang71602fe2019-01-18 15:10:27 +0800701}
702
Claire Chang6e675a12021-06-19 11:40:33 +0800703late_initcall(swiotlb_create_default_debugfs);
Dongli Zhang71602fe2019-01-18 15:10:27 +0800704
705#endif
Claire Changf4111e32021-06-19 11:40:40 +0800706
707#ifdef CONFIG_DMA_RESTRICTED_POOL
708struct page *swiotlb_alloc(struct device *dev, size_t size)
709{
710 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
711 phys_addr_t tlb_addr;
712 int index;
713
714 if (!mem)
715 return NULL;
716
717 index = swiotlb_find_slots(dev, 0, size);
718 if (index == -1)
719 return NULL;
720
721 tlb_addr = slot_addr(mem->start, index);
722
723 return pfn_to_page(PFN_DOWN(tlb_addr));
724}
725
726bool swiotlb_free(struct device *dev, struct page *page, size_t size)
727{
728 phys_addr_t tlb_addr = page_to_phys(page);
729
730 if (!is_swiotlb_buffer(dev, tlb_addr))
731 return false;
732
733 swiotlb_release_slots(dev, tlb_addr);
734
735 return true;
736}
737
738#endif /* CONFIG_DMA_RESTRICTED_POOL */