blob: f5ecadd4e1c16d0defde8695152b43fb039054a9 [file] [log] [blame]
Christoph Hellwig002e6742018-01-09 16:30:23 +01001// SPDX-License-Identifier: GPL-2.0
2/*
Christoph Hellwigefa70f22020-09-01 13:34:33 +02003 * Copyright (C) 2018-2020 Christoph Hellwig.
Christoph Hellwigbc3ec752018-09-08 11:22:43 +02004 *
5 * DMA operations that map physical memory directly without using an IOMMU.
Christoph Hellwig002e6742018-01-09 16:30:23 +01006 */
Mike Rapoport57c8a662018-10-30 15:09:49 -07007#include <linux/memblock.h> /* for max_pfn */
Christoph Hellwig002e6742018-01-09 16:30:23 +01008#include <linux/export.h>
9#include <linux/mm.h>
Christoph Hellwig0a0f0d82020-09-22 15:31:03 +020010#include <linux/dma-map-ops.h>
Christoph Hellwig002e6742018-01-09 16:30:23 +010011#include <linux/scatterlist.h>
12#include <linux/pfn.h>
Christoph Hellwig3acac062019-10-29 11:06:32 +010013#include <linux/vmalloc.h>
Christoph Hellwigc10f07a2018-03-19 11:38:25 +010014#include <linux/set_memory.h>
Jim Quinlane0d07272020-09-17 18:43:40 +020015#include <linux/slab.h>
Christoph Hellwig19c65c32020-09-22 15:34:22 +020016#include "direct.h"
Christoph Hellwig002e6742018-01-09 16:30:23 +010017
Christoph Hellwigc61e9632018-01-09 23:39:03 +010018/*
Nicolas Saenz Julienne8b5369e2019-10-14 20:31:03 +020019 * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use it
20 * it for entirely different regions. In that case the arch code needs to
21 * override the variable below for dma-direct to work properly.
Christoph Hellwigc61e9632018-01-09 23:39:03 +010022 */
Nicolas Saenz Julienne8b5369e2019-10-14 20:31:03 +020023unsigned int zone_dma_bits __ro_after_init = 24;
Christoph Hellwigc61e9632018-01-09 23:39:03 +010024
Christoph Hellwiga20bb052018-09-20 13:26:13 +020025static inline dma_addr_t phys_to_dma_direct(struct device *dev,
26 phys_addr_t phys)
27{
Tom Lendacky9087c372019-07-10 19:01:19 +000028 if (force_dma_unencrypted(dev))
Christoph Hellwig5ceda742020-08-17 17:34:03 +020029 return phys_to_dma_unencrypted(dev, phys);
Christoph Hellwiga20bb052018-09-20 13:26:13 +020030 return phys_to_dma(dev, phys);
31}
32
Christoph Hellwig34dc0ea2019-10-29 11:01:37 +010033static inline struct page *dma_direct_to_page(struct device *dev,
34 dma_addr_t dma_addr)
35{
36 return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr)));
37}
38
Christoph Hellwiga20bb052018-09-20 13:26:13 +020039u64 dma_direct_get_required_mask(struct device *dev)
40{
Kishon Vijay Abraham Icdcda0d2020-04-06 10:58:36 +053041 phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT;
42 u64 max_dma = phys_to_dma_direct(dev, phys);
Christoph Hellwiga20bb052018-09-20 13:26:13 +020043
44 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
45}
46
Christoph Hellwig94201392020-08-14 12:26:24 +020047static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
David Rientjesc84dc6e2020-04-14 17:04:55 -070048 u64 *phys_limit)
Christoph Hellwig7d21ee42018-09-06 20:30:54 -040049{
Nicolas Saenz Juliennea7ba70f2019-11-21 10:26:44 +010050 u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit);
Christoph Hellwigb4ebe602018-09-20 14:04:08 +020051
Christoph Hellwig79ac32a2018-10-01 07:40:53 -070052 /*
53 * Optimistically try the zone that the physical address mask falls
54 * into first. If that returns memory that isn't actually addressable
55 * we will fallback to the next lower zone and try again.
56 *
57 * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
58 * zones.
59 */
Christoph Hellwig7bc5c422020-09-08 17:56:22 +020060 *phys_limit = dma_to_phys(dev, dma_limit);
Nicolas Saenz Juliennea7ba70f2019-11-21 10:26:44 +010061 if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits))
Christoph Hellwig7d21ee42018-09-06 20:30:54 -040062 return GFP_DMA;
Nicolas Saenz Juliennea7ba70f2019-11-21 10:26:44 +010063 if (*phys_limit <= DMA_BIT_MASK(32))
Christoph Hellwig7d21ee42018-09-06 20:30:54 -040064 return GFP_DMA32;
65 return 0;
66}
67
Christoph Hellwig94201392020-08-14 12:26:24 +020068static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
Christoph Hellwig95f18392018-01-09 23:40:57 +010069{
Jim Quinlane0d07272020-09-17 18:43:40 +020070 dma_addr_t dma_addr = phys_to_dma_direct(dev, phys);
71
72 if (dma_addr == DMA_MAPPING_ERROR)
73 return false;
74 return dma_addr + size - 1 <=
75 min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
Christoph Hellwig95f18392018-01-09 23:40:57 +010076}
77
David Rientjes76a199402020-04-14 17:04:58 -070078/*
79 * Decrypting memory is allowed to block, so if this device requires
80 * unencrypted memory it must come from atomic pools.
81 */
82static inline bool dma_should_alloc_from_pool(struct device *dev, gfp_t gfp,
83 unsigned long attrs)
84{
85 if (!IS_ENABLED(CONFIG_DMA_COHERENT_POOL))
86 return false;
87 if (gfpflags_allow_blocking(gfp))
88 return false;
89 if (force_dma_unencrypted(dev))
90 return true;
91 if (!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP))
92 return false;
93 if (dma_alloc_need_uncached(dev, attrs))
94 return true;
95 return false;
96}
97
98static inline bool dma_should_free_from_pool(struct device *dev,
99 unsigned long attrs)
100{
101 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL))
102 return true;
103 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
104 !force_dma_unencrypted(dev))
105 return false;
106 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP))
107 return true;
108 return false;
109}
110
Christoph Hellwig26749b32020-06-15 08:52:31 +0200111static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
Christoph Hellwig3773dfe2020-08-17 17:14:28 +0200112 gfp_t gfp)
Christoph Hellwig002e6742018-01-09 16:30:23 +0100113{
Christoph Hellwig90ae4092019-08-20 11:45:49 +0900114 int node = dev_to_node(dev);
Christoph Hellwig080321d2017-12-22 11:51:44 +0100115 struct page *page = NULL;
Nicolas Saenz Juliennea7ba70f2019-11-21 10:26:44 +0100116 u64 phys_limit;
Christoph Hellwig002e6742018-01-09 16:30:23 +0100117
David Rientjes633d5fc2020-06-11 12:20:28 -0700118 WARN_ON_ONCE(!PAGE_ALIGNED(size));
119
David Rientjesc84dc6e2020-04-14 17:04:55 -0700120 gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
121 &phys_limit);
David Rientjes633d5fc2020-06-11 12:20:28 -0700122 page = dma_alloc_contiguous(dev, size, gfp);
Christoph Hellwig90ae4092019-08-20 11:45:49 +0900123 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
David Rientjes633d5fc2020-06-11 12:20:28 -0700124 dma_free_contiguous(dev, page, size);
Christoph Hellwig90ae4092019-08-20 11:45:49 +0900125 page = NULL;
126 }
Christoph Hellwig95f18392018-01-09 23:40:57 +0100127again:
Christoph Hellwig90ae4092019-08-20 11:45:49 +0900128 if (!page)
David Rientjes633d5fc2020-06-11 12:20:28 -0700129 page = alloc_pages_node(node, gfp, get_order(size));
Christoph Hellwig95f18392018-01-09 23:40:57 +0100130 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
Nicolin Chenb1d2dc02019-05-23 21:06:32 -0700131 dma_free_contiguous(dev, page, size);
Christoph Hellwig95f18392018-01-09 23:40:57 +0100132 page = NULL;
133
Takashi Iwaide7eab32018-04-16 17:18:19 +0200134 if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
Nicolas Saenz Juliennea7ba70f2019-11-21 10:26:44 +0100135 phys_limit < DMA_BIT_MASK(64) &&
Takashi Iwaide7eab32018-04-16 17:18:19 +0200136 !(gfp & (GFP_DMA32 | GFP_DMA))) {
137 gfp |= GFP_DMA32;
138 goto again;
139 }
140
Christoph Hellwigfbce2512019-02-13 08:01:03 +0100141 if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
Christoph Hellwig95f18392018-01-09 23:40:57 +0100142 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
143 goto again;
144 }
145 }
146
Christoph Hellwigb18814e72018-11-04 17:27:56 +0100147 return page;
148}
149
Christoph Hellwig2f5388a22020-08-17 17:06:40 +0200150void *dma_direct_alloc(struct device *dev, size_t size,
Christoph Hellwigb18814e72018-11-04 17:27:56 +0100151 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
152{
153 struct page *page;
154 void *ret;
David Rientjes56fccf22020-06-11 12:20:30 -0700155 int err;
Christoph Hellwigb18814e72018-11-04 17:27:56 +0100156
Christoph Hellwig2f5388a22020-08-17 17:06:40 +0200157 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
158 !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
159 dma_alloc_need_uncached(dev, attrs))
160 return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
161
David Rientjes633d5fc2020-06-11 12:20:28 -0700162 size = PAGE_ALIGN(size);
Christoph Hellwig3773dfe2020-08-17 17:14:28 +0200163 if (attrs & DMA_ATTR_NO_WARN)
164 gfp |= __GFP_NOWARN;
David Rientjes633d5fc2020-06-11 12:20:28 -0700165
David Rientjes76a199402020-04-14 17:04:58 -0700166 if (dma_should_alloc_from_pool(dev, gfp, attrs)) {
Christoph Hellwig94201392020-08-14 12:26:24 +0200167 u64 phys_mask;
168
169 gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
170 &phys_mask);
171 page = dma_alloc_from_pool(dev, size, &ret, gfp,
172 dma_coherent_ok);
173 if (!page)
Christoph Hellwig3acac062019-10-29 11:06:32 +0100174 return NULL;
175 goto done;
176 }
177
Christoph Hellwig3773dfe2020-08-17 17:14:28 +0200178 /* we always manually zero the memory once we are done */
179 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
Christoph Hellwig080321d2017-12-22 11:51:44 +0100180 if (!page)
181 return NULL;
Christoph Hellwigb18814e72018-11-04 17:27:56 +0100182
Christoph Hellwigcf14be02019-08-06 14:33:23 +0300183 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
184 !force_dma_unencrypted(dev)) {
Christoph Hellwigd98849a2019-06-14 16:17:27 +0200185 /* remove any dirty cache lines on the kernel alias */
186 if (!PageHighMem(page))
187 arch_dma_prep_coherent(page, size);
188 /* return the page pointer as the opaque cookie */
Christoph Hellwig3acac062019-10-29 11:06:32 +0100189 ret = page;
190 goto done;
191 }
192
193 if ((IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
194 dma_alloc_need_uncached(dev, attrs)) ||
195 (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) {
196 /* remove any dirty cache lines on the kernel alias */
David Rientjes633d5fc2020-06-11 12:20:28 -0700197 arch_dma_prep_coherent(page, size);
Christoph Hellwig3acac062019-10-29 11:06:32 +0100198
199 /* create a coherent mapping */
David Rientjes633d5fc2020-06-11 12:20:28 -0700200 ret = dma_common_contiguous_remap(page, size,
Christoph Hellwig3acac062019-10-29 11:06:32 +0100201 dma_pgprot(dev, PAGE_KERNEL, attrs),
202 __builtin_return_address(0));
Christoph Hellwig3d0fc342020-02-21 12:26:00 -0800203 if (!ret)
204 goto out_free_pages;
David Rientjes1a2b3352020-06-11 12:20:32 -0700205 if (force_dma_unencrypted(dev)) {
206 err = set_memory_decrypted((unsigned long)ret,
207 1 << get_order(size));
208 if (err)
209 goto out_free_pages;
210 }
Christoph Hellwig3acac062019-10-29 11:06:32 +0100211 memset(ret, 0, size);
212 goto done;
Christoph Hellwigd98849a2019-06-14 16:17:27 +0200213 }
214
Christoph Hellwig704f2c22018-09-22 20:47:26 +0200215 if (PageHighMem(page)) {
216 /*
217 * Depending on the cma= arguments and per-arch setup
Nicolin Chenb1d2dc02019-05-23 21:06:32 -0700218 * dma_alloc_contiguous could return highmem pages.
Christoph Hellwig704f2c22018-09-22 20:47:26 +0200219 * Without remapping there is no way to return them here,
220 * so log an error and fail.
221 */
222 dev_info(dev, "Rejecting highmem page from CMA.\n");
Christoph Hellwig3d0fc342020-02-21 12:26:00 -0800223 goto out_free_pages;
Christoph Hellwig704f2c22018-09-22 20:47:26 +0200224 }
225
Christoph Hellwigc10f07a2018-03-19 11:38:25 +0100226 ret = page_address(page);
David Rientjes56fccf22020-06-11 12:20:30 -0700227 if (force_dma_unencrypted(dev)) {
228 err = set_memory_decrypted((unsigned long)ret,
229 1 << get_order(size));
230 if (err)
231 goto out_free_pages;
232 }
Christoph Hellwig3acac062019-10-29 11:06:32 +0100233
Christoph Hellwigc10f07a2018-03-19 11:38:25 +0100234 memset(ret, 0, size);
Christoph Hellwigc30700d2019-06-03 08:43:51 +0200235
Christoph Hellwigfa7e2242020-02-21 15:55:43 -0800236 if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
Christoph Hellwig4b85fae2019-06-14 16:06:10 +0200237 dma_alloc_need_uncached(dev, attrs)) {
Christoph Hellwigc30700d2019-06-03 08:43:51 +0200238 arch_dma_prep_coherent(page, size);
Christoph Hellwigfa7e2242020-02-21 15:55:43 -0800239 ret = arch_dma_set_uncached(ret, size);
240 if (IS_ERR(ret))
David Rientjes96a539f2020-06-11 12:20:29 -0700241 goto out_encrypt_pages;
Christoph Hellwigc30700d2019-06-03 08:43:51 +0200242 }
Christoph Hellwig3acac062019-10-29 11:06:32 +0100243done:
Christoph Hellwig96eb89c2020-08-17 17:20:52 +0200244 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
Christoph Hellwigc10f07a2018-03-19 11:38:25 +0100245 return ret;
David Rientjes96a539f2020-06-11 12:20:29 -0700246
247out_encrypt_pages:
David Rientjes56fccf22020-06-11 12:20:30 -0700248 if (force_dma_unencrypted(dev)) {
249 err = set_memory_encrypted((unsigned long)page_address(page),
250 1 << get_order(size));
251 /* If memory cannot be re-encrypted, it must be leaked */
252 if (err)
253 return NULL;
254 }
Christoph Hellwig3d0fc342020-02-21 12:26:00 -0800255out_free_pages:
256 dma_free_contiguous(dev, page, size);
257 return NULL;
Christoph Hellwig002e6742018-01-09 16:30:23 +0100258}
259
Christoph Hellwig2f5388a22020-08-17 17:06:40 +0200260void dma_direct_free(struct device *dev, size_t size,
261 void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
Christoph Hellwig002e6742018-01-09 16:30:23 +0100262{
Christoph Hellwigc10f07a2018-03-19 11:38:25 +0100263 unsigned int page_order = get_order(size);
Christoph Hellwig080321d2017-12-22 11:51:44 +0100264
Christoph Hellwig2f5388a22020-08-17 17:06:40 +0200265 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
266 !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
267 dma_alloc_need_uncached(dev, attrs)) {
268 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
269 return;
270 }
271
David Rientjes76a199402020-04-14 17:04:58 -0700272 /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
273 if (dma_should_free_from_pool(dev, attrs) &&
274 dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
275 return;
276
Christoph Hellwigcf14be02019-08-06 14:33:23 +0300277 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
278 !force_dma_unencrypted(dev)) {
Christoph Hellwigd98849a2019-06-14 16:17:27 +0200279 /* cpu_addr is a struct page cookie, not a kernel address */
Christoph Hellwigacaade12019-10-29 09:57:09 +0100280 dma_free_contiguous(dev, cpu_addr, size);
Christoph Hellwigd98849a2019-06-14 16:17:27 +0200281 return;
282 }
283
Tom Lendacky9087c372019-07-10 19:01:19 +0000284 if (force_dma_unencrypted(dev))
Christoph Hellwigc10f07a2018-03-19 11:38:25 +0100285 set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
Christoph Hellwigc30700d2019-06-03 08:43:51 +0200286
Christoph Hellwig3acac062019-10-29 11:06:32 +0100287 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
288 vunmap(cpu_addr);
Christoph Hellwig999a5d12020-02-21 12:35:05 -0800289 else if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
290 arch_dma_clear_uncached(cpu_addr, size);
Christoph Hellwig3acac062019-10-29 11:06:32 +0100291
292 dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size);
Christoph Hellwig002e6742018-01-09 16:30:23 +0100293}
294
Christoph Hellwigefa70f22020-09-01 13:34:33 +0200295struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
296 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
297{
298 struct page *page;
299 void *ret;
300
301 if (dma_should_alloc_from_pool(dev, gfp, 0)) {
302 page = dma_alloc_from_pool(dev, size, &ret, gfp,
303 dma_coherent_ok);
304 if (!page)
305 return NULL;
306 goto done;
307 }
308
309 page = __dma_direct_alloc_pages(dev, size, gfp);
310 if (!page)
311 return NULL;
Christoph Hellwig08a89c22020-09-26 16:39:36 +0200312 if (PageHighMem(page)) {
313 /*
314 * Depending on the cma= arguments and per-arch setup
315 * dma_alloc_contiguous could return highmem pages.
316 * Without remapping there is no way to return them here,
317 * so log an error and fail.
318 */
319 dev_info(dev, "Rejecting highmem page from CMA.\n");
320 goto out_free_pages;
321 }
322
Christoph Hellwigefa70f22020-09-01 13:34:33 +0200323 ret = page_address(page);
324 if (force_dma_unencrypted(dev)) {
325 if (set_memory_decrypted((unsigned long)ret,
326 1 << get_order(size)))
327 goto out_free_pages;
328 }
329 memset(ret, 0, size);
330done:
331 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
332 return page;
333out_free_pages:
334 dma_free_contiguous(dev, page, size);
335 return NULL;
336}
337
338void dma_direct_free_pages(struct device *dev, size_t size,
339 struct page *page, dma_addr_t dma_addr,
340 enum dma_data_direction dir)
341{
342 unsigned int page_order = get_order(size);
343 void *vaddr = page_address(page);
344
345 /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
346 if (dma_should_free_from_pool(dev, 0) &&
347 dma_free_from_pool(dev, vaddr, size))
348 return;
349
350 if (force_dma_unencrypted(dev))
351 set_memory_encrypted((unsigned long)vaddr, 1 << page_order);
352
353 dma_free_contiguous(dev, page, size);
354}
355
Christoph Hellwig55897af2018-12-03 11:43:54 +0100356#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
357 defined(CONFIG_SWIOTLB)
Christoph Hellwig55897af2018-12-03 11:43:54 +0100358void dma_direct_sync_sg_for_device(struct device *dev,
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200359 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
360{
361 struct scatterlist *sg;
362 int i;
363
Christoph Hellwig55897af2018-12-03 11:43:54 +0100364 for_each_sg(sgl, sg, nents, i) {
Fugang Duan449fa542019-07-19 17:26:48 +0800365 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
366
367 if (unlikely(is_swiotlb_buffer(paddr)))
368 swiotlb_tbl_sync_single(dev, paddr, sg->length,
Christoph Hellwig55897af2018-12-03 11:43:54 +0100369 dir, SYNC_FOR_DEVICE);
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200370
Christoph Hellwig55897af2018-12-03 11:43:54 +0100371 if (!dev_is_dma_coherent(dev))
Christoph Hellwig56e35f92019-11-07 18:03:11 +0100372 arch_sync_dma_for_device(paddr, sg->length,
Christoph Hellwig55897af2018-12-03 11:43:54 +0100373 dir);
374 }
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200375}
Christoph Hellwig17ac5242018-12-03 11:14:09 +0100376#endif
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200377
378#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
Christoph Hellwig55897af2018-12-03 11:43:54 +0100379 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
380 defined(CONFIG_SWIOTLB)
Christoph Hellwig55897af2018-12-03 11:43:54 +0100381void dma_direct_sync_sg_for_cpu(struct device *dev,
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200382 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
383{
384 struct scatterlist *sg;
385 int i;
386
Christoph Hellwig55897af2018-12-03 11:43:54 +0100387 for_each_sg(sgl, sg, nents, i) {
Fugang Duan449fa542019-07-19 17:26:48 +0800388 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
389
Christoph Hellwig55897af2018-12-03 11:43:54 +0100390 if (!dev_is_dma_coherent(dev))
Christoph Hellwig56e35f92019-11-07 18:03:11 +0100391 arch_sync_dma_for_cpu(paddr, sg->length, dir);
Fugang Duan449fa542019-07-19 17:26:48 +0800392
393 if (unlikely(is_swiotlb_buffer(paddr)))
394 swiotlb_tbl_sync_single(dev, paddr, sg->length, dir,
Christoph Hellwig55897af2018-12-03 11:43:54 +0100395 SYNC_FOR_CPU);
Christoph Hellwigabdaf112020-08-17 16:41:50 +0200396
397 if (dir == DMA_FROM_DEVICE)
398 arch_dma_mark_clean(paddr, sg->length);
Christoph Hellwig55897af2018-12-03 11:43:54 +0100399 }
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200400
Christoph Hellwig55897af2018-12-03 11:43:54 +0100401 if (!dev_is_dma_coherent(dev))
Christoph Hellwig56e35f92019-11-07 18:03:11 +0100402 arch_sync_dma_for_cpu_all();
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200403}
404
Christoph Hellwig55897af2018-12-03 11:43:54 +0100405void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200406 int nents, enum dma_data_direction dir, unsigned long attrs)
407{
Christoph Hellwig55897af2018-12-03 11:43:54 +0100408 struct scatterlist *sg;
409 int i;
410
411 for_each_sg(sgl, sg, nents, i)
412 dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir,
413 attrs);
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200414}
415#endif
416
Christoph Hellwig782e6762018-04-16 15:24:51 +0200417int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
418 enum dma_data_direction dir, unsigned long attrs)
Christoph Hellwig002e6742018-01-09 16:30:23 +0100419{
420 int i;
421 struct scatterlist *sg;
422
423 for_each_sg(sgl, sg, nents, i) {
Christoph Hellwig17ac5242018-12-03 11:14:09 +0100424 sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
425 sg->offset, sg->length, dir, attrs);
426 if (sg->dma_address == DMA_MAPPING_ERROR)
Christoph Hellwig55897af2018-12-03 11:43:54 +0100427 goto out_unmap;
Christoph Hellwig002e6742018-01-09 16:30:23 +0100428 sg_dma_len(sg) = sg->length;
429 }
430
431 return nents;
Christoph Hellwig55897af2018-12-03 11:43:54 +0100432
433out_unmap:
434 dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
435 return 0;
Christoph Hellwig002e6742018-01-09 16:30:23 +0100436}
437
Christoph Hellwigcfced782019-01-04 18:20:05 +0100438dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
439 size_t size, enum dma_data_direction dir, unsigned long attrs)
440{
441 dma_addr_t dma_addr = paddr;
442
Christoph Hellwig68a33b12019-11-19 17:38:58 +0100443 if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
Christoph Hellwig75467ee2020-02-03 14:54:50 +0100444 dev_err_once(dev,
445 "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
446 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
447 WARN_ON_ONCE(1);
Christoph Hellwigcfced782019-01-04 18:20:05 +0100448 return DMA_MAPPING_ERROR;
449 }
450
451 return dma_addr;
452}
Christoph Hellwigcfced782019-01-04 18:20:05 +0100453
Christoph Hellwig34dc0ea2019-10-29 11:01:37 +0100454int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
455 void *cpu_addr, dma_addr_t dma_addr, size_t size,
456 unsigned long attrs)
457{
458 struct page *page = dma_direct_to_page(dev, dma_addr);
459 int ret;
460
461 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
462 if (!ret)
463 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
464 return ret;
465}
466
Christoph Hellwig34dc0ea2019-10-29 11:01:37 +0100467bool dma_direct_can_mmap(struct device *dev)
468{
469 return dev_is_dma_coherent(dev) ||
470 IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP);
471}
472
473int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
474 void *cpu_addr, dma_addr_t dma_addr, size_t size,
475 unsigned long attrs)
476{
477 unsigned long user_count = vma_pages(vma);
478 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
479 unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr));
480 int ret = -ENXIO;
481
482 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
483
484 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
485 return ret;
486
487 if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff)
488 return -ENXIO;
489 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
490 user_count << PAGE_SHIFT, vma->vm_page_prot);
491}
Christoph Hellwig34dc0ea2019-10-29 11:01:37 +0100492
Christoph Hellwig1a9777a2017-12-24 15:04:32 +0100493int dma_direct_supported(struct device *dev, u64 mask)
494{
Christoph Hellwig91ef26f2020-02-03 18:11:10 +0100495 u64 min_mask = (max_pfn - 1) << PAGE_SHIFT;
Christoph Hellwig9d7a2242018-09-07 09:31:58 +0200496
Christoph Hellwig91ef26f2020-02-03 18:11:10 +0100497 /*
498 * Because 32-bit DMA masks are so common we expect every architecture
499 * to be able to satisfy them - either by not supporting more physical
500 * memory, or by providing a ZONE_DMA32. If neither is the case, the
501 * architecture needs to use an IOMMU instead of the direct mapping.
502 */
503 if (mask >= DMA_BIT_MASK(32))
504 return 1;
Christoph Hellwig9d7a2242018-09-07 09:31:58 +0200505
Lendacky, Thomasc92a54c2018-12-17 14:39:16 +0000506 /*
Christoph Hellwig5ceda742020-08-17 17:34:03 +0200507 * This check needs to be against the actual bit mask value, so use
508 * phys_to_dma_unencrypted() here so that the SME encryption mask isn't
Lendacky, Thomasc92a54c2018-12-17 14:39:16 +0000509 * part of the check.
510 */
Christoph Hellwig91ef26f2020-02-03 18:11:10 +0100511 if (IS_ENABLED(CONFIG_ZONE_DMA))
512 min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits));
Christoph Hellwig5ceda742020-08-17 17:34:03 +0200513 return mask >= phys_to_dma_unencrypted(dev, min_mask);
Christoph Hellwig1a9777a2017-12-24 15:04:32 +0100514}
Joerg Roedel133d6242019-02-07 12:59:15 +0100515
516size_t dma_direct_max_mapping_size(struct device *dev)
517{
Joerg Roedel133d6242019-02-07 12:59:15 +0100518 /* If SWIOTLB is active, use its maximum mapping size */
Christoph Hellwiga5008b52019-07-16 22:00:54 +0200519 if (is_swiotlb_active() &&
520 (dma_addressing_limited(dev) || swiotlb_force == SWIOTLB_FORCE))
521 return swiotlb_max_mapping_size(dev);
522 return SIZE_MAX;
Joerg Roedel133d6242019-02-07 12:59:15 +0100523}
Christoph Hellwig3aa9162502020-06-29 15:03:56 +0200524
525bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr)
526{
527 return !dev_is_dma_coherent(dev) ||
528 is_swiotlb_buffer(dma_to_phys(dev, dma_addr));
529}
Jim Quinlane0d07272020-09-17 18:43:40 +0200530
531/**
532 * dma_direct_set_offset - Assign scalar offset for a single DMA range.
533 * @dev: device pointer; needed to "own" the alloced memory.
534 * @cpu_start: beginning of memory region covered by this offset.
535 * @dma_start: beginning of DMA/PCI region covered by this offset.
536 * @size: size of the region.
537 *
538 * This is for the simple case of a uniform offset which cannot
539 * be discovered by "dma-ranges".
540 *
541 * It returns -ENOMEM if out of memory, -EINVAL if a map
542 * already exists, 0 otherwise.
543 *
544 * Note: any call to this from a driver is a bug. The mapping needs
545 * to be described by the device tree or other firmware interfaces.
546 */
547int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
548 dma_addr_t dma_start, u64 size)
549{
550 struct bus_dma_region *map;
551 u64 offset = (u64)cpu_start - (u64)dma_start;
552
553 if (dev->dma_range_map) {
554 dev_err(dev, "attempt to add DMA range to existing map\n");
555 return -EINVAL;
556 }
557
558 if (!offset)
559 return 0;
560
561 map = kcalloc(2, sizeof(*map), GFP_KERNEL);
562 if (!map)
563 return -ENOMEM;
564 map[0].cpu_start = cpu_start;
565 map[0].dma_start = dma_start;
566 map[0].offset = offset;
567 map[0].size = size;
568 dev->dma_range_map = map;
569 return 0;
570}
571EXPORT_SYMBOL_GPL(dma_direct_set_offset);