blob: f1e7ea160b43399c89e0e026270f0791796bafaf [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Dynamic DMA mapping support.
4 *
Jan Beulich563aaf02007-02-05 18:51:25 -08005 * This implementation is a fallback for platforms that do not support
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * I/O TLBs (aka DMA address translation hardware).
7 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
8 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
9 * Copyright (C) 2000, 2003 Hewlett-Packard Co
10 * David Mosberger-Tang <davidm@hpl.hp.com>
11 *
12 * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
13 * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
14 * unnecessary i-cache flushing.
John W. Linville569c8bf2005-09-29 14:45:24 -070015 * 04/07/.. ak Better overflow handling. Assorted fixes.
16 * 05/09/10 linville Add support for syncing ranges, support syncing for
17 * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
Becky Brucefb05a372008-12-22 10:26:09 -080018 * 08/12/11 beckyb Add highmem support
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 */
20
Kees Cook7d63fb32018-07-10 16:22:22 -070021#define pr_fmt(fmt) "software IO TLB: " fmt
22
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/cache.h>
Christoph Hellwigea8c64a2018-01-10 16:21:13 +010024#include <linux/dma-direct.h>
Christoph Hellwig9f4df962020-09-22 15:36:11 +020025#include <linux/dma-map-ops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/mm.h>
Paul Gortmaker8bc3bcc2011-11-16 21:29:17 -050027#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/spinlock.h>
29#include <linux/string.h>
Ian Campbell0016fde2008-12-16 12:17:27 -080030#include <linux/swiotlb.h>
Becky Brucefb05a372008-12-22 10:26:09 -080031#include <linux/pfn.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <linux/types.h>
33#include <linux/ctype.h>
Jeremy Fitzhardingeef9b1892008-12-16 12:17:33 -080034#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/gfp.h>
Christoph Hellwig84be4562015-05-01 12:46:15 +020036#include <linux/scatterlist.h>
Tom Lendackye9d1d2b2021-09-08 17:58:39 -050037#include <linux/cc_platform.h>
Christoph Hellwige7de6c72018-03-19 11:38:23 +010038#include <linux/set_memory.h>
Dongli Zhang71602fe2019-01-18 15:10:27 +080039#ifdef CONFIG_DEBUG_FS
40#include <linux/debugfs.h>
41#endif
Claire Chang0b84e4f2021-06-19 11:40:41 +080042#ifdef CONFIG_DMA_RESTRICTED_POOL
43#include <linux/io.h>
44#include <linux/of.h>
45#include <linux/of_fdt.h>
46#include <linux/of_reserved_mem.h>
47#include <linux/slab.h>
48#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
50#include <asm/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <asm/dma.h>
52
Tianyu Lan1a5e91d2021-12-13 02:14:02 -050053#include <linux/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070054#include <linux/init.h>
Mike Rapoport57c8a662018-10-30 15:09:49 -070055#include <linux/memblock.h>
FUJITA Tomonoria8522502008-04-29 00:59:36 -070056#include <linux/iommu-helper.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
Thierry Redingce5be5a2013-10-23 13:32:04 +020058#define CREATE_TRACE_POINTS
Zoltan Kiss2b2b6142013-09-04 21:11:05 +010059#include <trace/events/swiotlb.h>
60
Alex Williamson0b9afed2005-09-06 11:20:49 -060061#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
62
63/*
64 * Minimum IO TLB size to bother booting with. Systems with mainly
65 * 64bit capable cards will only lightly use the swiotlb. If we can't
66 * allocate a contiguous 1MB, we're probably in trouble anyway.
67 */
68#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
69
Claire Chang73f62092021-03-18 17:14:22 +010070#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
71
Geert Uytterhoevenae7871b2016-12-16 14:28:41 +010072enum swiotlb_force swiotlb_force;
Linus Torvalds1da177e2005-04-16 15:20:36 -070073
Will Deacon463e8622021-07-20 14:38:24 +010074struct io_tlb_mem io_tlb_default_mem;
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
Tianyu Lan1a5e91d2021-12-13 02:14:02 -050076phys_addr_t swiotlb_unencrypted_base;
77
Linus Torvalds1da177e2005-04-16 15:20:36 -070078/*
Konrad Rzeszutek Wilk7453c542016-12-20 10:02:02 -050079 * Max segment that we can provide which (if pages are contingous) will
80 * not be bounced (unless SWIOTLB_FORCE is set).
81 */
Andy Shevchenkob51e6272020-09-02 20:31:05 +030082static unsigned int max_segment;
Konrad Rzeszutek Wilk7453c542016-12-20 10:02:02 -050083
Christoph Hellwig2d299602021-03-18 17:14:23 +010084static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
85
Linus Torvalds1da177e2005-04-16 15:20:36 -070086static int __init
87setup_io_tlb_npages(char *str)
88{
89 if (isdigit(*str)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 /* avoid tail segment of size < IO_TLB_SEGSIZE */
Christoph Hellwig2d299602021-03-18 17:14:23 +010091 default_nslabs =
92 ALIGN(simple_strtoul(str, &str, 0), IO_TLB_SEGSIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 }
94 if (*str == ',')
95 ++str;
Florian Fainelli2726bf32021-03-22 18:53:49 -070096 if (!strcmp(str, "force"))
Geert Uytterhoevenae7871b2016-12-16 14:28:41 +010097 swiotlb_force = SWIOTLB_FORCE;
Florian Fainelli2726bf32021-03-22 18:53:49 -070098 else if (!strcmp(str, "noforce"))
Geert Uytterhoevenfff5d992016-12-16 14:28:42 +010099 swiotlb_force = SWIOTLB_NO_FORCE;
FUJITA Tomonorib18485e2009-11-12 00:03:28 +0900100
Yinghai Luc729de82013-04-15 22:23:45 -0700101 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102}
Yinghai Luc729de82013-04-15 22:23:45 -0700103early_param("swiotlb", setup_io_tlb_npages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104
Konrad Rzeszutek Wilk7453c542016-12-20 10:02:02 -0500105unsigned int swiotlb_max_segment(void)
106{
Will Deacon463e8622021-07-20 14:38:24 +0100107 return io_tlb_default_mem.nslabs ? max_segment : 0;
Konrad Rzeszutek Wilk7453c542016-12-20 10:02:02 -0500108}
109EXPORT_SYMBOL_GPL(swiotlb_max_segment);
110
111void swiotlb_set_max_segment(unsigned int val)
112{
113 if (swiotlb_force == SWIOTLB_FORCE)
114 max_segment = 1;
115 else
116 max_segment = rounddown(val, PAGE_SIZE);
117}
118
Yinghai Luc729de82013-04-15 22:23:45 -0700119unsigned long swiotlb_size_or_default(void)
120{
Christoph Hellwig2d299602021-03-18 17:14:23 +0100121 return default_nslabs << IO_TLB_SHIFT;
Yinghai Luc729de82013-04-15 22:23:45 -0700122}
123
Christoph Hellwig2d299602021-03-18 17:14:23 +0100124void __init swiotlb_adjust_size(unsigned long size)
Ashish Kalrae9988792020-12-10 01:25:15 +0000125{
Ashish Kalrae9988792020-12-10 01:25:15 +0000126 /*
127 * If swiotlb parameter has not been specified, give a chance to
128 * architectures such as those supporting memory encryption to
129 * adjust/expand SWIOTLB size for their use.
130 */
Christoph Hellwigdfc06b32021-04-29 08:28:59 +0200131 if (default_nslabs != IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT)
132 return;
Christoph Hellwig2d299602021-03-18 17:14:23 +0100133 size = ALIGN(size, IO_TLB_SIZE);
134 default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
135 pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20);
Ashish Kalrae9988792020-12-10 01:25:15 +0000136}
137
FUJITA Tomonoriad32e8c2009-11-10 19:46:19 +0900138void swiotlb_print_info(void)
Ian Campbell2e5b2b82008-12-16 12:17:34 -0800139{
Will Deacon463e8622021-07-20 14:38:24 +0100140 struct io_tlb_mem *mem = &io_tlb_default_mem;
Ian Campbell2e5b2b82008-12-16 12:17:34 -0800141
Will Deacon463e8622021-07-20 14:38:24 +0100142 if (!mem->nslabs) {
Kees Cook7d63fb32018-07-10 16:22:22 -0700143 pr_warn("No low mem\n");
Yinghai Luac2cbab2013-01-24 12:20:16 -0800144 return;
145 }
146
Claire Chang73f62092021-03-18 17:14:22 +0100147 pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end,
Christoph Hellwig2d299602021-03-18 17:14:23 +0100148 (mem->nslabs << IO_TLB_SHIFT) >> 20);
Ian Campbell2e5b2b82008-12-16 12:17:34 -0800149}
150
Christoph Hellwigc7fbeca2021-02-04 10:11:20 +0100151static inline unsigned long io_tlb_offset(unsigned long val)
152{
153 return val & (IO_TLB_SEGSIZE - 1);
154}
155
Christoph Hellwigc32a77fd2021-02-05 11:19:34 +0100156static inline unsigned long nr_slots(u64 val)
157{
158 return DIV_ROUND_UP(val, IO_TLB_SIZE);
159}
160
Tom Lendackyc7753202017-07-17 16:10:21 -0500161/*
Tianyu Lan1a5e91d2021-12-13 02:14:02 -0500162 * Remap swioltb memory in the unencrypted physical address space
163 * when swiotlb_unencrypted_base is set. (e.g. for Hyper-V AMD SEV-SNP
164 * Isolation VMs).
165 */
Wei Liu2deb55d2022-01-04 16:11:19 +0000166#ifdef CONFIG_HAS_IOMEM
Tianyu Lan1a5e91d2021-12-13 02:14:02 -0500167static void *swiotlb_mem_remap(struct io_tlb_mem *mem, unsigned long bytes)
168{
169 void *vaddr = NULL;
170
171 if (swiotlb_unencrypted_base) {
172 phys_addr_t paddr = mem->start + swiotlb_unencrypted_base;
173
174 vaddr = memremap(paddr, bytes, MEMREMAP_WB);
175 if (!vaddr)
176 pr_err("Failed to map the unencrypted memory %pa size %lx.\n",
177 &paddr, bytes);
178 }
179
180 return vaddr;
181}
Wei Liu2deb55d2022-01-04 16:11:19 +0000182#else
183static void *swiotlb_mem_remap(struct io_tlb_mem *mem, unsigned long bytes)
184{
185 return NULL;
186}
187#endif
Tianyu Lan1a5e91d2021-12-13 02:14:02 -0500188
189/*
Tom Lendackyc7753202017-07-17 16:10:21 -0500190 * Early SWIOTLB allocation may be too early to allow an architecture to
191 * perform the desired operations. This function allows the architecture to
192 * call SWIOTLB when the operations are possible. It needs to be called
193 * before the SWIOTLB memory is used.
194 */
195void __init swiotlb_update_mem_attributes(void)
196{
Will Deacon463e8622021-07-20 14:38:24 +0100197 struct io_tlb_mem *mem = &io_tlb_default_mem;
Tom Lendackyc7753202017-07-17 16:10:21 -0500198 void *vaddr;
199 unsigned long bytes;
200
Will Deacon463e8622021-07-20 14:38:24 +0100201 if (!mem->nslabs || mem->late_alloc)
Tom Lendackyc7753202017-07-17 16:10:21 -0500202 return;
Claire Chang73f62092021-03-18 17:14:22 +0100203 vaddr = phys_to_virt(mem->start);
204 bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT);
Christoph Hellwige7de6c72018-03-19 11:38:23 +0100205 set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
Tianyu Lan1a5e91d2021-12-13 02:14:02 -0500206
207 mem->vaddr = swiotlb_mem_remap(mem, bytes);
208 if (!mem->vaddr)
209 mem->vaddr = vaddr;
210
211 memset(mem->vaddr, 0, bytes);
Tom Lendackyc7753202017-07-17 16:10:21 -0500212}
213
Claire Chang0a655792021-06-19 11:40:32 +0800214static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
215 unsigned long nslabs, bool late_alloc)
216{
217 void *vaddr = phys_to_virt(start);
218 unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
219
220 mem->nslabs = nslabs;
221 mem->start = start;
222 mem->end = mem->start + bytes;
223 mem->index = 0;
224 mem->late_alloc = late_alloc;
Claire Chang903cd0f2021-06-24 23:55:20 +0800225
226 if (swiotlb_force == SWIOTLB_FORCE)
227 mem->force_bounce = true;
228
Claire Chang0a655792021-06-19 11:40:32 +0800229 spin_lock_init(&mem->lock);
230 for (i = 0; i < mem->nslabs; i++) {
231 mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i);
232 mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
233 mem->slots[i].alloc_size = 0;
234 }
Tianyu Lan1a5e91d2021-12-13 02:14:02 -0500235
236 /*
237 * If swiotlb_unencrypted_base is set, the bounce buffer memory will
238 * be remapped and cleared in swiotlb_update_mem_attributes.
239 */
240 if (swiotlb_unencrypted_base)
241 return;
242
Claire Chang0a655792021-06-19 11:40:32 +0800243 memset(vaddr, 0, bytes);
Tianyu Lan1a5e91d2021-12-13 02:14:02 -0500244 mem->vaddr = vaddr;
245 return;
Claire Chang0a655792021-06-19 11:40:32 +0800246}
247
Yinghai Luac2cbab2013-01-24 12:20:16 -0800248int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249{
Will Deacon463e8622021-07-20 14:38:24 +0100250 struct io_tlb_mem *mem = &io_tlb_default_mem;
Mike Rapoporta0bf8422019-03-11 23:30:26 -0700251 size_t alloc_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252
Florian Fainelli2726bf32021-03-22 18:53:49 -0700253 if (swiotlb_force == SWIOTLB_NO_FORCE)
254 return 0;
255
Christoph Hellwig5d0538b2021-03-01 08:44:31 +0100256 /* protect against double initialization */
Will Deacon463e8622021-07-20 14:38:24 +0100257 if (WARN_ON_ONCE(mem->nslabs))
Christoph Hellwig5d0538b2021-03-01 08:44:31 +0100258 return -ENOMEM;
259
Will Deacon463e8622021-07-20 14:38:24 +0100260 alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs));
261 mem->slots = memblock_alloc(alloc_size, PAGE_SIZE);
262 if (!mem->slots)
Christoph Hellwig2d299602021-03-18 17:14:23 +0100263 panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
264 __func__, alloc_size, PAGE_SIZE);
Claire Chang0a655792021-06-19 11:40:32 +0800265
266 swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
FUJITA Tomonoriad32e8c2009-11-10 19:46:19 +0900268 if (verbose)
269 swiotlb_print_info();
Claire Chang73f62092021-03-18 17:14:22 +0100270 swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT);
Yinghai Luac2cbab2013-01-24 12:20:16 -0800271 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272}
273
FUJITA Tomonoriabbceff2010-05-10 15:15:12 -0400274/*
275 * Statically reserve bounce buffer space and initialize bounce buffer data
276 * structures for the software IO TLB used to implement the DMA API.
277 */
Yinghai Luac2cbab2013-01-24 12:20:16 -0800278void __init
279swiotlb_init(int verbose)
FUJITA Tomonoriabbceff2010-05-10 15:15:12 -0400280{
Christoph Hellwig2d299602021-03-18 17:14:23 +0100281 size_t bytes = PAGE_ALIGN(default_nslabs << IO_TLB_SHIFT);
282 void *tlb;
FUJITA Tomonoriabbceff2010-05-10 15:15:12 -0400283
Florian Fainelli2726bf32021-03-22 18:53:49 -0700284 if (swiotlb_force == SWIOTLB_NO_FORCE)
285 return;
286
Yinghai Luac2cbab2013-01-24 12:20:16 -0800287 /* Get IO TLB memory from the low pages */
Christoph Hellwig2d299602021-03-18 17:14:23 +0100288 tlb = memblock_alloc_low(bytes, PAGE_SIZE);
289 if (!tlb)
290 goto fail;
291 if (swiotlb_init_with_tbl(tlb, default_nslabs, verbose))
292 goto fail_free_mem;
293 return;
FUJITA Tomonoriabbceff2010-05-10 15:15:12 -0400294
Christoph Hellwig2d299602021-03-18 17:14:23 +0100295fail_free_mem:
Mike Rapoport4421cca2021-11-05 13:43:22 -0700296 memblock_free(tlb, bytes);
Christoph Hellwig2d299602021-03-18 17:14:23 +0100297fail:
Kees Cook7d63fb32018-07-10 16:22:22 -0700298 pr_warn("Cannot allocate buffer");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299}
300
Alex Williamson0b9afed2005-09-06 11:20:49 -0600301/*
302 * Systems with larger DMA zones (those that don't support ISA) can
303 * initialize the swiotlb later using the slab allocator if needed.
304 * This should be just like above, but with some error catching.
305 */
306int
Jan Beulich563aaf02007-02-05 18:51:25 -0800307swiotlb_late_init_with_default_size(size_t default_size)
Alex Williamson0b9afed2005-09-06 11:20:49 -0600308{
Christoph Hellwig2d299602021-03-18 17:14:23 +0100309 unsigned long nslabs =
310 ALIGN(default_size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
311 unsigned long bytes;
Alexander Duyckff7204a2012-10-15 10:19:28 -0700312 unsigned char *vstart = NULL;
Alex Williamson0b9afed2005-09-06 11:20:49 -0600313 unsigned int order;
Konrad Rzeszutek Wilk74838b72012-07-27 20:55:27 -0400314 int rc = 0;
Alex Williamson0b9afed2005-09-06 11:20:49 -0600315
Florian Fainelli2726bf32021-03-22 18:53:49 -0700316 if (swiotlb_force == SWIOTLB_NO_FORCE)
317 return 0;
318
Alex Williamson0b9afed2005-09-06 11:20:49 -0600319 /*
320 * Get IO TLB memory from the low pages
321 */
Christoph Hellwig2d299602021-03-18 17:14:23 +0100322 order = get_order(nslabs << IO_TLB_SHIFT);
323 nslabs = SLABS_PER_PAGE << order;
324 bytes = nslabs << IO_TLB_SHIFT;
Alex Williamson0b9afed2005-09-06 11:20:49 -0600325
326 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
Alexander Duyckff7204a2012-10-15 10:19:28 -0700327 vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
328 order);
329 if (vstart)
Alex Williamson0b9afed2005-09-06 11:20:49 -0600330 break;
331 order--;
332 }
333
Christoph Hellwig2d299602021-03-18 17:14:23 +0100334 if (!vstart)
Konrad Rzeszutek Wilk74838b72012-07-27 20:55:27 -0400335 return -ENOMEM;
Christoph Hellwig2d299602021-03-18 17:14:23 +0100336
Jan Beulich563aaf02007-02-05 18:51:25 -0800337 if (order != get_order(bytes)) {
Kees Cook7d63fb32018-07-10 16:22:22 -0700338 pr_warn("only able to allocate %ld MB\n",
339 (PAGE_SIZE << order) >> 20);
Christoph Hellwig2d299602021-03-18 17:14:23 +0100340 nslabs = SLABS_PER_PAGE << order;
Alex Williamson0b9afed2005-09-06 11:20:49 -0600341 }
Christoph Hellwig2d299602021-03-18 17:14:23 +0100342 rc = swiotlb_late_init_with_tbl(vstart, nslabs);
Konrad Rzeszutek Wilk74838b72012-07-27 20:55:27 -0400343 if (rc)
Alexander Duyckff7204a2012-10-15 10:19:28 -0700344 free_pages((unsigned long)vstart, order);
Konrad Rzeszutek Wilk7453c542016-12-20 10:02:02 -0500345
Konrad Rzeszutek Wilk74838b72012-07-27 20:55:27 -0400346 return rc;
347}
348
349int
350swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
351{
Will Deacon463e8622021-07-20 14:38:24 +0100352 struct io_tlb_mem *mem = &io_tlb_default_mem;
Claire Chang0a655792021-06-19 11:40:32 +0800353 unsigned long bytes = nslabs << IO_TLB_SHIFT;
Konrad Rzeszutek Wilk74838b72012-07-27 20:55:27 -0400354
Florian Fainelli2726bf32021-03-22 18:53:49 -0700355 if (swiotlb_force == SWIOTLB_NO_FORCE)
356 return 0;
357
Christoph Hellwig5d0538b2021-03-01 08:44:31 +0100358 /* protect against double initialization */
Will Deacon463e8622021-07-20 14:38:24 +0100359 if (WARN_ON_ONCE(mem->nslabs))
Christoph Hellwig5d0538b2021-03-01 08:44:31 +0100360 return -ENOMEM;
361
Will Deacon463e8622021-07-20 14:38:24 +0100362 mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
363 get_order(array_size(sizeof(*mem->slots), nslabs)));
364 if (!mem->slots)
Christoph Hellwig2d299602021-03-18 17:14:23 +0100365 return -ENOMEM;
Konrad Rzeszutek Wilk74838b72012-07-27 20:55:27 -0400366
Christoph Hellwige7de6c72018-03-19 11:38:23 +0100367 set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT);
Claire Chang0a655792021-06-19 11:40:32 +0800368 swiotlb_init_io_tlb_mem(mem, virt_to_phys(tlb), nslabs, true);
Alex Williamson0b9afed2005-09-06 11:20:49 -0600369
FUJITA Tomonoriad32e8c2009-11-10 19:46:19 +0900370 swiotlb_print_info();
Claire Chang73f62092021-03-18 17:14:22 +0100371 swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT);
Alex Williamson0b9afed2005-09-06 11:20:49 -0600372 return 0;
Alex Williamson0b9afed2005-09-06 11:20:49 -0600373}
374
Christoph Hellwig7f2c8bb2017-12-23 14:14:54 +0100375void __init swiotlb_exit(void)
FUJITA Tomonori5740afd2009-11-10 19:46:18 +0900376{
Will Deacon463e8622021-07-20 14:38:24 +0100377 struct io_tlb_mem *mem = &io_tlb_default_mem;
Will Deaconad6c0022021-07-20 14:38:26 +0100378 unsigned long tbl_vaddr;
379 size_t tbl_size, slots_size;
Claire Chang73f62092021-03-18 17:14:22 +0100380
Will Deacon463e8622021-07-20 14:38:24 +0100381 if (!mem->nslabs)
FUJITA Tomonori5740afd2009-11-10 19:46:18 +0900382 return;
383
Will Deacon1efd3fc2021-07-20 14:38:25 +0100384 pr_info("tearing down default memory pool\n");
Will Deaconad6c0022021-07-20 14:38:26 +0100385 tbl_vaddr = (unsigned long)phys_to_virt(mem->start);
386 tbl_size = PAGE_ALIGN(mem->end - mem->start);
387 slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs));
388
389 set_memory_encrypted(tbl_vaddr, tbl_size >> PAGE_SHIFT);
390 if (mem->late_alloc) {
391 free_pages(tbl_vaddr, get_order(tbl_size));
392 free_pages((unsigned long)mem->slots, get_order(slots_size));
393 } else {
394 memblock_free_late(mem->start, tbl_size);
395 memblock_free_late(__pa(mem->slots), slots_size);
396 }
397
Will Deacon463e8622021-07-20 14:38:24 +0100398 memset(mem, 0, sizeof(*mem));
FUJITA Tomonori5740afd2009-11-10 19:46:18 +0900399}
400
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401/*
Bumyong Lee5f894682021-05-10 18:10:04 +0900402 * Return the offset into a iotlb slot required to keep the device happy.
403 */
404static unsigned int swiotlb_align_offset(struct device *dev, u64 addr)
405{
406 return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1);
407}
408
409/*
Dongli Zhang6442ca22019-01-18 15:10:26 +0800410 * Bounce: copy the swiotlb buffer from or back to the original dma location
Becky Brucefb05a372008-12-22 10:26:09 -0800411 */
Christoph Hellwig2bdba622021-03-01 08:44:25 +0100412static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size,
413 enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414{
Claire Chang69031f52021-06-19 11:40:34 +0800415 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
Claire Chang73f62092021-03-18 17:14:22 +0100416 int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT;
Christoph Hellwig2d299602021-03-18 17:14:23 +0100417 phys_addr_t orig_addr = mem->slots[index].orig_addr;
418 size_t alloc_size = mem->slots[index].alloc_size;
Alexander Duyckaf51a9f2012-10-15 10:19:55 -0700419 unsigned long pfn = PFN_DOWN(orig_addr);
Tianyu Lan1a5e91d2021-12-13 02:14:02 -0500420 unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start;
Dominique Martinet868c9dd2021-07-07 14:12:54 +0900421 unsigned int tlb_offset, orig_addr_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422
Christoph Hellwig2bdba622021-03-01 08:44:25 +0100423 if (orig_addr == INVALID_PHYS_ADDR)
424 return;
425
Dominique Martinet868c9dd2021-07-07 14:12:54 +0900426 tlb_offset = tlb_addr & (IO_TLB_SIZE - 1);
427 orig_addr_offset = swiotlb_align_offset(dev, orig_addr);
428 if (tlb_offset < orig_addr_offset) {
429 dev_WARN_ONCE(dev, 1,
430 "Access before mapping start detected. orig offset %u, requested offset %u.\n",
431 orig_addr_offset, tlb_offset);
432 return;
433 }
434
435 tlb_offset -= orig_addr_offset;
436 if (tlb_offset > alloc_size) {
437 dev_WARN_ONCE(dev, 1,
438 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu+%u.\n",
439 alloc_size, size, tlb_offset);
440 return;
441 }
Bumyong Lee5f894682021-05-10 18:10:04 +0900442
443 orig_addr += tlb_offset;
444 alloc_size -= tlb_offset;
445
Christoph Hellwig2bdba622021-03-01 08:44:25 +0100446 if (size > alloc_size) {
447 dev_WARN_ONCE(dev, 1,
448 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n",
449 alloc_size, size);
450 size = alloc_size;
451 }
452
Becky Brucefb05a372008-12-22 10:26:09 -0800453 if (PageHighMem(pfn_to_page(pfn))) {
454 /* The buffer does not have a mapping. Map it in and copy */
Alexander Duyckaf51a9f2012-10-15 10:19:55 -0700455 unsigned int offset = orig_addr & ~PAGE_MASK;
Becky Brucefb05a372008-12-22 10:26:09 -0800456 char *buffer;
457 unsigned int sz = 0;
458 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459
Becky Brucefb05a372008-12-22 10:26:09 -0800460 while (size) {
Becky Bruce67131ad2009-04-08 09:09:16 -0500461 sz = min_t(size_t, PAGE_SIZE - offset, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462
Becky Brucefb05a372008-12-22 10:26:09 -0800463 local_irq_save(flags);
Cong Wangc3eede82011-11-25 23:14:39 +0800464 buffer = kmap_atomic(pfn_to_page(pfn));
Becky Brucefb05a372008-12-22 10:26:09 -0800465 if (dir == DMA_TO_DEVICE)
Alexander Duyckaf51a9f2012-10-15 10:19:55 -0700466 memcpy(vaddr, buffer + offset, sz);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 else
Alexander Duyckaf51a9f2012-10-15 10:19:55 -0700468 memcpy(buffer + offset, vaddr, sz);
Cong Wangc3eede82011-11-25 23:14:39 +0800469 kunmap_atomic(buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 local_irq_restore(flags);
Becky Brucefb05a372008-12-22 10:26:09 -0800471
472 size -= sz;
473 pfn++;
Alexander Duyckaf51a9f2012-10-15 10:19:55 -0700474 vaddr += sz;
Becky Brucefb05a372008-12-22 10:26:09 -0800475 offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 }
Alexander Duyckaf51a9f2012-10-15 10:19:55 -0700477 } else if (dir == DMA_TO_DEVICE) {
478 memcpy(vaddr, phys_to_virt(orig_addr), size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 } else {
Alexander Duyckaf51a9f2012-10-15 10:19:55 -0700480 memcpy(phys_to_virt(orig_addr), vaddr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 }
482}
483
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100484#define slot_addr(start, idx) ((start) + ((idx) << IO_TLB_SHIFT))
485
486/*
487 * Carefully handle integer overflow which can occur when boundary_mask == ~0UL.
488 */
489static inline unsigned long get_max_slots(unsigned long boundary_mask)
490{
491 if (boundary_mask == ~0UL)
492 return 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
493 return nr_slots(boundary_mask + 1);
494}
495
Claire Chang73f62092021-03-18 17:14:22 +0100496static unsigned int wrap_index(struct io_tlb_mem *mem, unsigned int index)
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100497{
Claire Chang73f62092021-03-18 17:14:22 +0100498 if (index >= mem->nslabs)
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100499 return 0;
500 return index;
501}
502
503/*
504 * Find a suitable number of IO TLB entries size that will fit this request and
505 * allocate a buffer from that IO TLB pool.
506 */
Claire Chang36f7b2f2021-06-24 23:55:21 +0800507static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
David Stevense81e99b2021-09-29 11:32:59 +0900508 size_t alloc_size, unsigned int alloc_align_mask)
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100509{
Claire Chang69031f52021-06-19 11:40:34 +0800510 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100511 unsigned long boundary_mask = dma_get_seg_boundary(dev);
512 dma_addr_t tbl_dma_addr =
Claire Chang73f62092021-03-18 17:14:22 +0100513 phys_to_dma_unencrypted(dev, mem->start) & boundary_mask;
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100514 unsigned long max_slots = get_max_slots(boundary_mask);
Christoph Hellwig1f221a02021-02-22 14:39:44 -0500515 unsigned int iotlb_align_mask =
516 dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1);
517 unsigned int nslots = nr_slots(alloc_size), stride;
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100518 unsigned int index, wrap, count = 0, i;
Claire Chang36f7b2f2021-06-24 23:55:21 +0800519 unsigned int offset = swiotlb_align_offset(dev, orig_addr);
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100520 unsigned long flags;
521
522 BUG_ON(!nslots);
523
524 /*
Christoph Hellwig1f221a02021-02-22 14:39:44 -0500525 * For mappings with an alignment requirement don't bother looping to
526 * unaligned slots once we found an aligned one. For allocations of
527 * PAGE_SIZE or larger only look for page aligned allocations.
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100528 */
Christoph Hellwig1f221a02021-02-22 14:39:44 -0500529 stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1;
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100530 if (alloc_size >= PAGE_SIZE)
Christoph Hellwig1f221a02021-02-22 14:39:44 -0500531 stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT));
David Stevense81e99b2021-09-29 11:32:59 +0900532 stride = max(stride, (alloc_align_mask >> IO_TLB_SHIFT) + 1);
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100533
Claire Chang73f62092021-03-18 17:14:22 +0100534 spin_lock_irqsave(&mem->lock, flags);
535 if (unlikely(nslots > mem->nslabs - mem->used))
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100536 goto not_found;
537
Claire Chang73f62092021-03-18 17:14:22 +0100538 index = wrap = wrap_index(mem, ALIGN(mem->index, stride));
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100539 do {
Claire Changf4111e32021-06-19 11:40:40 +0800540 if (orig_addr &&
541 (slot_addr(tbl_dma_addr, index) & iotlb_align_mask) !=
542 (orig_addr & iotlb_align_mask)) {
Claire Chang73f62092021-03-18 17:14:22 +0100543 index = wrap_index(mem, index + 1);
Christoph Hellwig1f221a02021-02-22 14:39:44 -0500544 continue;
545 }
546
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100547 /*
548 * If we find a slot that indicates we have 'nslots' number of
549 * contiguous buffers, we allocate the buffers from that slot
550 * and mark the entries as '0' indicating unavailable.
551 */
552 if (!iommu_is_span_boundary(index, nslots,
553 nr_slots(tbl_dma_addr),
554 max_slots)) {
Christoph Hellwig2d299602021-03-18 17:14:23 +0100555 if (mem->slots[index].list >= nslots)
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100556 goto found;
557 }
Claire Chang73f62092021-03-18 17:14:22 +0100558 index = wrap_index(mem, index + stride);
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100559 } while (index != wrap);
560
561not_found:
Claire Chang73f62092021-03-18 17:14:22 +0100562 spin_unlock_irqrestore(&mem->lock, flags);
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100563 return -1;
564
565found:
Claire Chang36f7b2f2021-06-24 23:55:21 +0800566 for (i = index; i < index + nslots; i++) {
Christoph Hellwig2d299602021-03-18 17:14:23 +0100567 mem->slots[i].list = 0;
Claire Chang36f7b2f2021-06-24 23:55:21 +0800568 mem->slots[i].alloc_size =
569 alloc_size - (offset + ((i - index) << IO_TLB_SHIFT));
570 }
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100571 for (i = index - 1;
572 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 &&
Christoph Hellwig2d299602021-03-18 17:14:23 +0100573 mem->slots[i].list; i--)
574 mem->slots[i].list = ++count;
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100575
576 /*
577 * Update the indices to avoid searching in the next round.
578 */
Claire Chang73f62092021-03-18 17:14:22 +0100579 if (index + nslots < mem->nslabs)
580 mem->index = index + nslots;
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100581 else
Claire Chang73f62092021-03-18 17:14:22 +0100582 mem->index = 0;
583 mem->used += nslots;
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100584
Claire Chang73f62092021-03-18 17:14:22 +0100585 spin_unlock_irqrestore(&mem->lock, flags);
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100586 return index;
587}
588
589phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
Christoph Hellwigfc0021a2020-10-23 08:33:09 +0200590 size_t mapping_size, size_t alloc_size,
David Stevense81e99b2021-09-29 11:32:59 +0900591 unsigned int alloc_align_mask, enum dma_data_direction dir,
592 unsigned long attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593{
Claire Chang69031f52021-06-19 11:40:34 +0800594 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
Christoph Hellwig1f221a02021-02-22 14:39:44 -0500595 unsigned int offset = swiotlb_align_offset(dev, orig_addr);
Claire Chang95b079d2021-04-22 16:14:53 +0800596 unsigned int i;
597 int index;
Alexander Duycke05ed4d2012-10-15 10:19:39 -0700598 phys_addr_t tlb_addr;
FUJITA Tomonori681cc5c2008-02-04 22:28:16 -0800599
Christoph Hellwig2d299602021-03-18 17:14:23 +0100600 if (!mem)
Yinghai Luac2cbab2013-01-24 12:20:16 -0800601 panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
602
Tom Lendackye9d1d2b2021-09-08 17:58:39 -0500603 if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
Thiago Jung Bauermann47e5d8f2019-08-06 01:49:15 -0300604 pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
Tom Lendacky648babb2017-07-17 16:10:22 -0500605
Lu Baolu3fc1ca02019-09-06 14:14:48 +0800606 if (mapping_size > alloc_size) {
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100607 dev_warn_once(dev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)",
Lu Baolu3fc1ca02019-09-06 14:14:48 +0800608 mapping_size, alloc_size);
609 return (phys_addr_t)DMA_MAPPING_ERROR;
610 }
611
David Stevense81e99b2021-09-29 11:32:59 +0900612 index = swiotlb_find_slots(dev, orig_addr,
613 alloc_size + offset, alloc_align_mask);
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100614 if (index == -1) {
615 if (!(attrs & DMA_ATTR_NO_WARN))
616 dev_warn_ratelimited(dev,
617 "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
Claire Chang73f62092021-03-18 17:14:22 +0100618 alloc_size, mem->nslabs, mem->used);
Christoph Hellwig26a7e092021-02-04 11:08:35 +0100619 return (phys_addr_t)DMA_MAPPING_ERROR;
620 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621
622 /*
623 * Save away the mapping from the original address to the DMA address.
624 * This is needed when we sync the memory. Then we sync the buffer if
625 * needed.
626 */
Claire Chang36f7b2f2021-06-24 23:55:21 +0800627 for (i = 0; i < nr_slots(alloc_size + offset); i++)
Christoph Hellwig2d299602021-03-18 17:14:23 +0100628 mem->slots[index + i].orig_addr = slot_addr(orig_addr, i);
Claire Chang73f62092021-03-18 17:14:22 +0100629 tlb_addr = slot_addr(mem->start, index) + offset;
Alexander Duyck0443fa02016-11-02 07:13:02 -0400630 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
631 (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
Christoph Hellwig2bdba622021-03-01 08:44:25 +0100632 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE);
Alexander Duycke05ed4d2012-10-15 10:19:39 -0700633 return tlb_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634}
635
Claire Chang70347872021-06-19 11:40:39 +0800636static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637{
Claire Chang70347872021-06-19 11:40:39 +0800638 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 unsigned long flags;
Claire Chang70347872021-06-19 11:40:39 +0800640 unsigned int offset = swiotlb_align_offset(dev, tlb_addr);
Claire Chang73f62092021-03-18 17:14:22 +0100641 int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT;
Christoph Hellwig2d299602021-03-18 17:14:23 +0100642 int nslots = nr_slots(mem->slots[index].alloc_size + offset);
Christoph Hellwig2bdba622021-03-01 08:44:25 +0100643 int count, i;
Martin Radevdaf95142021-01-12 16:07:29 +0100644
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 * Return the buffer to the free list by setting the corresponding
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200647 * entries to indicate the number of contiguous entries available.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 * While returning the entries to the free list, we merge the entries
649 * with slots below and above the pool being returned.
650 */
Claire Chang73f62092021-03-18 17:14:22 +0100651 spin_lock_irqsave(&mem->lock, flags);
Christoph Hellwigca10d0f2021-02-04 10:13:40 +0100652 if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE))
Christoph Hellwig2d299602021-03-18 17:14:23 +0100653 count = mem->slots[index + nslots].list;
Christoph Hellwigca10d0f2021-02-04 10:13:40 +0100654 else
655 count = 0;
Dongli Zhang71602fe2019-01-18 15:10:27 +0800656
Christoph Hellwigca10d0f2021-02-04 10:13:40 +0100657 /*
658 * Step 1: return the slots to the free list, merging the slots with
659 * superceeding slots
660 */
661 for (i = index + nslots - 1; i >= index; i--) {
Christoph Hellwig2d299602021-03-18 17:14:23 +0100662 mem->slots[i].list = ++count;
663 mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
664 mem->slots[i].alloc_size = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 }
Christoph Hellwigca10d0f2021-02-04 10:13:40 +0100666
667 /*
668 * Step 2: merge the returned slots with the preceding slots, if
669 * available (non zero)
670 */
671 for (i = index - 1;
Christoph Hellwig2d299602021-03-18 17:14:23 +0100672 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list;
Christoph Hellwigca10d0f2021-02-04 10:13:40 +0100673 i--)
Christoph Hellwig2d299602021-03-18 17:14:23 +0100674 mem->slots[i].list = ++count;
Claire Chang73f62092021-03-18 17:14:22 +0100675 mem->used -= nslots;
676 spin_unlock_irqrestore(&mem->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677}
678
Claire Chang70347872021-06-19 11:40:39 +0800679/*
680 * tlb_addr is the physical address of the bounce buffer to unmap.
681 */
682void swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr,
683 size_t mapping_size, enum dma_data_direction dir,
684 unsigned long attrs)
685{
686 /*
687 * First, sync the memory before unmapping the entry
688 */
689 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
690 (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
691 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_FROM_DEVICE);
692
693 swiotlb_release_slots(dev, tlb_addr);
694}
695
Christoph Hellwig80808d22021-03-01 08:44:26 +0100696void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
697 size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698{
Christoph Hellwig80808d22021-03-01 08:44:26 +0100699 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
700 swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE);
701 else
702 BUG_ON(dir != DMA_FROM_DEVICE);
703}
704
705void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
706 size_t size, enum dma_data_direction dir)
707{
708 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
709 swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE);
710 else
711 BUG_ON(dir != DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712}
713
Christoph Hellwig55897af2018-12-03 11:43:54 +0100714/*
Christoph Hellwig4a47cba2020-02-03 14:44:38 +0100715 * Create a swiotlb mapping for the buffer at @paddr, and in case of DMAing
Christoph Hellwig55897af2018-12-03 11:43:54 +0100716 * to the device copy the data into it as well.
717 */
Christoph Hellwig4a47cba2020-02-03 14:44:38 +0100718dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
719 enum dma_data_direction dir, unsigned long attrs)
Christoph Hellwigc4dae362018-08-20 16:21:10 +0200720{
Christoph Hellwig4a47cba2020-02-03 14:44:38 +0100721 phys_addr_t swiotlb_addr;
722 dma_addr_t dma_addr;
Christoph Hellwigc4dae362018-08-20 16:21:10 +0200723
Christoph Hellwig4a47cba2020-02-03 14:44:38 +0100724 trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size,
725 swiotlb_force);
Christoph Hellwigc4dae362018-08-20 16:21:10 +0200726
David Stevense81e99b2021-09-29 11:32:59 +0900727 swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, 0, dir,
Christoph Hellwigfc0021a2020-10-23 08:33:09 +0200728 attrs);
Christoph Hellwig4a47cba2020-02-03 14:44:38 +0100729 if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
730 return DMA_MAPPING_ERROR;
Christoph Hellwigc4dae362018-08-20 16:21:10 +0200731
732 /* Ensure that the address returned is DMA'ble */
Christoph Hellwig5ceda742020-08-17 17:34:03 +0200733 dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr);
Christoph Hellwig4a47cba2020-02-03 14:44:38 +0100734 if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
Christoph Hellwig29730732021-03-01 08:44:24 +0100735 swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, dir,
Christoph Hellwigc4dae362018-08-20 16:21:10 +0200736 attrs | DMA_ATTR_SKIP_CPU_SYNC);
Christoph Hellwig4a47cba2020-02-03 14:44:38 +0100737 dev_WARN_ONCE(dev, 1,
738 "swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
739 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
740 return DMA_MAPPING_ERROR;
Christoph Hellwigc4dae362018-08-20 16:21:10 +0200741 }
742
Christoph Hellwig4a47cba2020-02-03 14:44:38 +0100743 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
744 arch_sync_dma_for_device(swiotlb_addr, size, dir);
745 return dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746}
747
Joerg Roedelabe420b2019-02-07 12:59:13 +0100748size_t swiotlb_max_mapping_size(struct device *dev)
749{
Christoph Hellwigb5d7ccb2021-02-05 11:18:40 +0100750 return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE;
Joerg Roedelabe420b2019-02-07 12:59:13 +0100751}
Joerg Roedel492366f2019-02-07 12:59:14 +0100752
Claire Chang6f2beb22021-06-19 11:40:36 +0800753bool is_swiotlb_active(struct device *dev)
Joerg Roedel492366f2019-02-07 12:59:14 +0100754{
Will Deacon463e8622021-07-20 14:38:24 +0100755 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
756
757 return mem && mem->nslabs;
Joerg Roedel492366f2019-02-07 12:59:14 +0100758}
Christoph Hellwig2cbc2772021-03-18 17:14:24 +0100759EXPORT_SYMBOL_GPL(is_swiotlb_active);
Linus Torvalds45ba8d52019-03-10 12:47:57 -0700760
Dongli Zhang71602fe2019-01-18 15:10:27 +0800761#ifdef CONFIG_DEBUG_FS
Claire Chang6e675a12021-06-19 11:40:33 +0800762static struct dentry *debugfs_dir;
Dongli Zhang71602fe2019-01-18 15:10:27 +0800763
Claire Chang6e675a12021-06-19 11:40:33 +0800764static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem)
765{
766 debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs);
767 debugfs_create_ulong("io_tlb_used", 0400, mem->debugfs, &mem->used);
768}
769
770static int __init swiotlb_create_default_debugfs(void)
Dongli Zhang71602fe2019-01-18 15:10:27 +0800771{
Will Deacon463e8622021-07-20 14:38:24 +0100772 struct io_tlb_mem *mem = &io_tlb_default_mem;
Dongli Zhang71602fe2019-01-18 15:10:27 +0800773
Claire Chang6e675a12021-06-19 11:40:33 +0800774 debugfs_dir = debugfs_create_dir("swiotlb", NULL);
Will Deacon463e8622021-07-20 14:38:24 +0100775 if (mem->nslabs) {
Claire Chang6e675a12021-06-19 11:40:33 +0800776 mem->debugfs = debugfs_dir;
777 swiotlb_create_debugfs_files(mem);
778 }
Dongli Zhang71602fe2019-01-18 15:10:27 +0800779 return 0;
Dongli Zhang71602fe2019-01-18 15:10:27 +0800780}
781
Claire Chang6e675a12021-06-19 11:40:33 +0800782late_initcall(swiotlb_create_default_debugfs);
Dongli Zhang71602fe2019-01-18 15:10:27 +0800783
784#endif
Claire Changf4111e32021-06-19 11:40:40 +0800785
786#ifdef CONFIG_DMA_RESTRICTED_POOL
Claire Chang09a4a792021-07-01 11:31:30 +0800787
788#ifdef CONFIG_DEBUG_FS
789static void rmem_swiotlb_debugfs_init(struct reserved_mem *rmem)
790{
791 struct io_tlb_mem *mem = rmem->priv;
792
793 mem->debugfs = debugfs_create_dir(rmem->name, debugfs_dir);
794 swiotlb_create_debugfs_files(mem);
795}
796#else
797static void rmem_swiotlb_debugfs_init(struct reserved_mem *rmem)
798{
799}
800#endif
801
Claire Changf4111e32021-06-19 11:40:40 +0800802struct page *swiotlb_alloc(struct device *dev, size_t size)
803{
804 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
805 phys_addr_t tlb_addr;
806 int index;
807
808 if (!mem)
809 return NULL;
810
David Stevense81e99b2021-09-29 11:32:59 +0900811 index = swiotlb_find_slots(dev, 0, size, 0);
Claire Changf4111e32021-06-19 11:40:40 +0800812 if (index == -1)
813 return NULL;
814
815 tlb_addr = slot_addr(mem->start, index);
816
817 return pfn_to_page(PFN_DOWN(tlb_addr));
818}
819
820bool swiotlb_free(struct device *dev, struct page *page, size_t size)
821{
822 phys_addr_t tlb_addr = page_to_phys(page);
823
824 if (!is_swiotlb_buffer(dev, tlb_addr))
825 return false;
826
827 swiotlb_release_slots(dev, tlb_addr);
828
829 return true;
830}
831
Claire Chang0b84e4f2021-06-19 11:40:41 +0800832static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
833 struct device *dev)
834{
835 struct io_tlb_mem *mem = rmem->priv;
836 unsigned long nslabs = rmem->size >> IO_TLB_SHIFT;
837
838 /*
839 * Since multiple devices can share the same pool, the private data,
840 * io_tlb_mem struct, will be initialized by the first device attached
841 * to it.
842 */
843 if (!mem) {
Will Deacon463e8622021-07-20 14:38:24 +0100844 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
Claire Chang0b84e4f2021-06-19 11:40:41 +0800845 if (!mem)
846 return -ENOMEM;
847
Will Deacon463e8622021-07-20 14:38:24 +0100848 mem->slots = kzalloc(array_size(sizeof(*mem->slots), nslabs),
849 GFP_KERNEL);
850 if (!mem->slots) {
851 kfree(mem);
852 return -ENOMEM;
853 }
854
Claire Chang0b84e4f2021-06-19 11:40:41 +0800855 set_memory_decrypted((unsigned long)phys_to_virt(rmem->base),
856 rmem->size >> PAGE_SHIFT);
857 swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, false);
858 mem->force_bounce = true;
859 mem->for_alloc = true;
860
861 rmem->priv = mem;
862
Claire Chang09a4a792021-07-01 11:31:30 +0800863 rmem_swiotlb_debugfs_init(rmem);
Claire Chang0b84e4f2021-06-19 11:40:41 +0800864 }
865
866 dev->dma_io_tlb_mem = mem;
867
868 return 0;
869}
870
871static void rmem_swiotlb_device_release(struct reserved_mem *rmem,
872 struct device *dev)
873{
Will Deacon463e8622021-07-20 14:38:24 +0100874 dev->dma_io_tlb_mem = &io_tlb_default_mem;
Claire Chang0b84e4f2021-06-19 11:40:41 +0800875}
876
877static const struct reserved_mem_ops rmem_swiotlb_ops = {
878 .device_init = rmem_swiotlb_device_init,
879 .device_release = rmem_swiotlb_device_release,
880};
881
882static int __init rmem_swiotlb_setup(struct reserved_mem *rmem)
883{
884 unsigned long node = rmem->fdt_node;
885
886 if (of_get_flat_dt_prop(node, "reusable", NULL) ||
887 of_get_flat_dt_prop(node, "linux,cma-default", NULL) ||
888 of_get_flat_dt_prop(node, "linux,dma-default", NULL) ||
889 of_get_flat_dt_prop(node, "no-map", NULL))
890 return -EINVAL;
891
892 if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) {
893 pr_err("Restricted DMA pool must be accessible within the linear mapping.");
894 return -EINVAL;
895 }
896
897 rmem->ops = &rmem_swiotlb_ops;
898 pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n",
899 &rmem->base, (unsigned long)rmem->size / SZ_1M);
900 return 0;
901}
902
903RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup);
Claire Changf4111e32021-06-19 11:40:40 +0800904#endif /* CONFIG_DMA_RESTRICTED_POOL */