blob: 50f48e9e459875bbe171ae7224fbfa38396af5ba [file] [log] [blame]
Christoph Hellwig002e6742018-01-09 16:30:23 +01001// SPDX-License-Identifier: GPL-2.0
2/*
Christoph Hellwigefa70f22020-09-01 13:34:33 +02003 * Copyright (C) 2018-2020 Christoph Hellwig.
Christoph Hellwigbc3ec752018-09-08 11:22:43 +02004 *
5 * DMA operations that map physical memory directly without using an IOMMU.
Christoph Hellwig002e6742018-01-09 16:30:23 +01006 */
Mike Rapoport57c8a662018-10-30 15:09:49 -07007#include <linux/memblock.h> /* for max_pfn */
Christoph Hellwig002e6742018-01-09 16:30:23 +01008#include <linux/export.h>
9#include <linux/mm.h>
Christoph Hellwig0a0f0d82020-09-22 15:31:03 +020010#include <linux/dma-map-ops.h>
Christoph Hellwig002e6742018-01-09 16:30:23 +010011#include <linux/scatterlist.h>
12#include <linux/pfn.h>
Christoph Hellwig3acac062019-10-29 11:06:32 +010013#include <linux/vmalloc.h>
Christoph Hellwigc10f07a2018-03-19 11:38:25 +010014#include <linux/set_memory.h>
Jim Quinlane0d07272020-09-17 18:43:40 +020015#include <linux/slab.h>
Christoph Hellwig19c65c32020-09-22 15:34:22 +020016#include "direct.h"
Christoph Hellwig002e6742018-01-09 16:30:23 +010017
Christoph Hellwigc61e9632018-01-09 23:39:03 +010018/*
Randy Dunlap7b7b8a22020-10-15 20:10:28 -070019 * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use
Nicolas Saenz Julienne8b5369e2019-10-14 20:31:03 +020020 * it for entirely different regions. In that case the arch code needs to
21 * override the variable below for dma-direct to work properly.
Christoph Hellwigc61e9632018-01-09 23:39:03 +010022 */
Nicolas Saenz Julienne8b5369e2019-10-14 20:31:03 +020023unsigned int zone_dma_bits __ro_after_init = 24;
Christoph Hellwigc61e9632018-01-09 23:39:03 +010024
Christoph Hellwiga20bb052018-09-20 13:26:13 +020025static inline dma_addr_t phys_to_dma_direct(struct device *dev,
26 phys_addr_t phys)
27{
Tom Lendacky9087c372019-07-10 19:01:19 +000028 if (force_dma_unencrypted(dev))
Christoph Hellwig5ceda742020-08-17 17:34:03 +020029 return phys_to_dma_unencrypted(dev, phys);
Christoph Hellwiga20bb052018-09-20 13:26:13 +020030 return phys_to_dma(dev, phys);
31}
32
Christoph Hellwig34dc0ea2019-10-29 11:01:37 +010033static inline struct page *dma_direct_to_page(struct device *dev,
34 dma_addr_t dma_addr)
35{
36 return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr)));
37}
38
Christoph Hellwiga20bb052018-09-20 13:26:13 +020039u64 dma_direct_get_required_mask(struct device *dev)
40{
Kishon Vijay Abraham Icdcda0d2020-04-06 10:58:36 +053041 phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT;
42 u64 max_dma = phys_to_dma_direct(dev, phys);
Christoph Hellwiga20bb052018-09-20 13:26:13 +020043
44 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
45}
46
Christoph Hellwig94201392020-08-14 12:26:24 +020047static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
David Rientjesc84dc6e2020-04-14 17:04:55 -070048 u64 *phys_limit)
Christoph Hellwig7d21ee42018-09-06 20:30:54 -040049{
Nicolas Saenz Juliennea7ba70f2019-11-21 10:26:44 +010050 u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit);
Christoph Hellwigb4ebe602018-09-20 14:04:08 +020051
Christoph Hellwig79ac32a2018-10-01 07:40:53 -070052 /*
53 * Optimistically try the zone that the physical address mask falls
54 * into first. If that returns memory that isn't actually addressable
55 * we will fallback to the next lower zone and try again.
56 *
57 * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
58 * zones.
59 */
Christoph Hellwig7bc5c422020-09-08 17:56:22 +020060 *phys_limit = dma_to_phys(dev, dma_limit);
Nicolas Saenz Juliennea7ba70f2019-11-21 10:26:44 +010061 if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits))
Christoph Hellwig7d21ee42018-09-06 20:30:54 -040062 return GFP_DMA;
Nicolas Saenz Juliennea7ba70f2019-11-21 10:26:44 +010063 if (*phys_limit <= DMA_BIT_MASK(32))
Christoph Hellwig7d21ee42018-09-06 20:30:54 -040064 return GFP_DMA32;
65 return 0;
66}
67
Christoph Hellwig94201392020-08-14 12:26:24 +020068static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
Christoph Hellwig95f18392018-01-09 23:40:57 +010069{
Jim Quinlane0d07272020-09-17 18:43:40 +020070 dma_addr_t dma_addr = phys_to_dma_direct(dev, phys);
71
72 if (dma_addr == DMA_MAPPING_ERROR)
73 return false;
74 return dma_addr + size - 1 <=
75 min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
Christoph Hellwig95f18392018-01-09 23:40:57 +010076}
77
Christoph Hellwig4d056472021-10-18 13:18:34 +020078static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)
79{
80 if (!force_dma_unencrypted(dev))
81 return 0;
82 return set_memory_decrypted((unsigned long)vaddr, 1 << get_order(size));
83}
84
85static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
86{
Christoph Hellwiga90cf302021-11-09 15:41:01 +010087 int ret;
88
Christoph Hellwig4d056472021-10-18 13:18:34 +020089 if (!force_dma_unencrypted(dev))
90 return 0;
Christoph Hellwiga90cf302021-11-09 15:41:01 +010091 ret = set_memory_encrypted((unsigned long)vaddr, 1 << get_order(size));
92 if (ret)
93 pr_warn_ratelimited("leaking DMA memory that can't be re-encrypted\n");
94 return ret;
Christoph Hellwig4d056472021-10-18 13:18:34 +020095}
96
Claire Changf4111e32021-06-19 11:40:40 +080097static void __dma_direct_free_pages(struct device *dev, struct page *page,
98 size_t size)
99{
Christoph Hellwigf5d39392021-10-21 09:34:59 +0200100 if (swiotlb_free(dev, page, size))
Claire Changf4111e32021-06-19 11:40:40 +0800101 return;
102 dma_free_contiguous(dev, page, size);
103}
104
Christoph Hellwigaea7e2a2021-10-21 09:39:12 +0200105static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size)
106{
107 struct page *page = swiotlb_alloc(dev, size);
108
109 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
110 swiotlb_free(dev, page, size);
111 return NULL;
112 }
113
114 return page;
115}
116
Christoph Hellwig26749b32020-06-15 08:52:31 +0200117static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
Christoph Hellwig3773dfe2020-08-17 17:14:28 +0200118 gfp_t gfp)
Christoph Hellwig002e6742018-01-09 16:30:23 +0100119{
Christoph Hellwig90ae4092019-08-20 11:45:49 +0900120 int node = dev_to_node(dev);
Christoph Hellwig080321d2017-12-22 11:51:44 +0100121 struct page *page = NULL;
Nicolas Saenz Juliennea7ba70f2019-11-21 10:26:44 +0100122 u64 phys_limit;
Christoph Hellwig002e6742018-01-09 16:30:23 +0100123
David Rientjes633d5fc2020-06-11 12:20:28 -0700124 WARN_ON_ONCE(!PAGE_ALIGNED(size));
125
Christoph Hellwigaea7e2a2021-10-21 09:39:12 +0200126 if (is_swiotlb_for_alloc(dev))
127 return dma_direct_alloc_swiotlb(dev, size);
128
David Rientjesc84dc6e2020-04-14 17:04:55 -0700129 gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
130 &phys_limit);
David Rientjes633d5fc2020-06-11 12:20:28 -0700131 page = dma_alloc_contiguous(dev, size, gfp);
Christoph Hellwig90ae4092019-08-20 11:45:49 +0900132 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
David Rientjes633d5fc2020-06-11 12:20:28 -0700133 dma_free_contiguous(dev, page, size);
Christoph Hellwig90ae4092019-08-20 11:45:49 +0900134 page = NULL;
135 }
Christoph Hellwig95f18392018-01-09 23:40:57 +0100136again:
Christoph Hellwig90ae4092019-08-20 11:45:49 +0900137 if (!page)
David Rientjes633d5fc2020-06-11 12:20:28 -0700138 page = alloc_pages_node(node, gfp, get_order(size));
Christoph Hellwig95f18392018-01-09 23:40:57 +0100139 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
Nicolin Chenb1d2dc02019-05-23 21:06:32 -0700140 dma_free_contiguous(dev, page, size);
Christoph Hellwig95f18392018-01-09 23:40:57 +0100141 page = NULL;
142
Takashi Iwaide7eab32018-04-16 17:18:19 +0200143 if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
Nicolas Saenz Juliennea7ba70f2019-11-21 10:26:44 +0100144 phys_limit < DMA_BIT_MASK(64) &&
Takashi Iwaide7eab32018-04-16 17:18:19 +0200145 !(gfp & (GFP_DMA32 | GFP_DMA))) {
146 gfp |= GFP_DMA32;
147 goto again;
148 }
149
Christoph Hellwigfbce2512019-02-13 08:01:03 +0100150 if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
Christoph Hellwig95f18392018-01-09 23:40:57 +0100151 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
152 goto again;
153 }
154 }
155
Christoph Hellwigb18814e72018-11-04 17:27:56 +0100156 return page;
157}
158
Christoph Hellwig28e45762021-11-09 15:50:28 +0100159/*
160 * Check if a potentially blocking operations needs to dip into the atomic
161 * pools for the given device/gfp.
162 */
163static bool dma_direct_use_pool(struct device *dev, gfp_t gfp)
164{
165 return !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev);
166}
167
Christoph Hellwig5b138c52020-10-07 11:06:09 +0200168static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
169 dma_addr_t *dma_handle, gfp_t gfp)
170{
171 struct page *page;
172 u64 phys_mask;
173 void *ret;
174
Christoph Hellwig78bc7272021-10-21 10:00:55 +0200175 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_DMA_COHERENT_POOL)))
176 return NULL;
177
Christoph Hellwig5b138c52020-10-07 11:06:09 +0200178 gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
179 &phys_mask);
180 page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok);
181 if (!page)
182 return NULL;
183 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
184 return ret;
185}
186
Christoph Hellwigd541ae52021-10-18 13:08:07 +0200187static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size,
188 dma_addr_t *dma_handle, gfp_t gfp)
189{
190 struct page *page;
191
192 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
193 if (!page)
194 return NULL;
195
196 /* remove any dirty cache lines on the kernel alias */
197 if (!PageHighMem(page))
198 arch_dma_prep_coherent(page, size);
199
200 /* return the page pointer as the opaque cookie */
201 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
202 return page;
203}
204
Christoph Hellwig2f5388a22020-08-17 17:06:40 +0200205void *dma_direct_alloc(struct device *dev, size_t size,
Christoph Hellwigb18814e72018-11-04 17:27:56 +0100206 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
207{
Christoph Hellwigf3c96222021-11-09 15:20:40 +0100208 bool remap = false, set_uncached = false;
Christoph Hellwigb18814e72018-11-04 17:27:56 +0100209 struct page *page;
210 void *ret;
211
David Rientjes633d5fc2020-06-11 12:20:28 -0700212 size = PAGE_ALIGN(size);
Christoph Hellwig3773dfe2020-08-17 17:14:28 +0200213 if (attrs & DMA_ATTR_NO_WARN)
214 gfp |= __GFP_NOWARN;
David Rientjes633d5fc2020-06-11 12:20:28 -0700215
Christoph Hellwig849face2020-10-07 11:04:08 +0200216 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
Christoph Hellwigd541ae52021-10-18 13:08:07 +0200217 !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev))
218 return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp);
Christoph Hellwig849face2020-10-07 11:04:08 +0200219
Christoph Hellwiga86d1092021-10-21 09:47:31 +0200220 if (!dev_is_dma_coherent(dev)) {
221 /*
222 * Fallback to the arch handler if it exists. This should
223 * eventually go away.
224 */
225 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
226 !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
227 !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
228 !is_swiotlb_for_alloc(dev))
229 return arch_dma_alloc(dev, size, dma_handle, gfp,
230 attrs);
Christoph Hellwig849face2020-10-07 11:04:08 +0200231
Christoph Hellwiga86d1092021-10-21 09:47:31 +0200232 /*
233 * If there is a global pool, always allocate from it for
234 * non-coherent devices.
235 */
236 if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL))
237 return dma_alloc_from_global_coherent(dev, size,
238 dma_handle);
239
240 /*
241 * Otherwise remap if the architecture is asking for it. But
242 * given that remapping memory is a blocking operation we'll
243 * instead have to dip into the atomic pools.
244 */
245 remap = IS_ENABLED(CONFIG_DMA_DIRECT_REMAP);
246 if (remap) {
Christoph Hellwig28e45762021-11-09 15:50:28 +0100247 if (dma_direct_use_pool(dev, gfp))
Christoph Hellwiga86d1092021-10-21 09:47:31 +0200248 return dma_direct_alloc_from_pool(dev, size,
249 dma_handle, gfp);
250 } else {
Christoph Hellwig955f58f2021-11-09 15:47:56 +0100251 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED))
252 return NULL;
253 set_uncached = true;
Christoph Hellwiga86d1092021-10-21 09:47:31 +0200254 }
255 }
Christoph Hellwigfaf4ef82021-06-23 14:21:16 +0200256
Christoph Hellwig849face2020-10-07 11:04:08 +0200257 /*
Christoph Hellwiga86d1092021-10-21 09:47:31 +0200258 * Decrypting memory may block, so allocate the memory from the atomic
259 * pools if we can't block.
Christoph Hellwig849face2020-10-07 11:04:08 +0200260 */
Christoph Hellwig28e45762021-11-09 15:50:28 +0100261 if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
Christoph Hellwig5b138c52020-10-07 11:06:09 +0200262 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
Christoph Hellwig3acac062019-10-29 11:06:32 +0100263
Christoph Hellwig3773dfe2020-08-17 17:14:28 +0200264 /* we always manually zero the memory once we are done */
265 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
Christoph Hellwig080321d2017-12-22 11:51:44 +0100266 if (!page)
267 return NULL;
Christoph Hellwiga86d1092021-10-21 09:47:31 +0200268 if (PageHighMem(page)) {
Christoph Hellwigf3c96222021-11-09 15:20:40 +0100269 /*
270 * Depending on the cma= arguments and per-arch setup,
271 * dma_alloc_contiguous could return highmem pages.
272 * Without remapping there is no way to return them here, so
273 * log an error and fail.
274 */
275 if (!IS_ENABLED(CONFIG_DMA_REMAP)) {
276 dev_info(dev, "Rejecting highmem page from CMA.\n");
277 goto out_free_pages;
278 }
279 remap = true;
Christoph Hellwiga86d1092021-10-21 09:47:31 +0200280 set_uncached = false;
281 }
Christoph Hellwigf3c96222021-11-09 15:20:40 +0100282
283 if (remap) {
Christoph Hellwig3acac062019-10-29 11:06:32 +0100284 /* remove any dirty cache lines on the kernel alias */
David Rientjes633d5fc2020-06-11 12:20:28 -0700285 arch_dma_prep_coherent(page, size);
Christoph Hellwig3acac062019-10-29 11:06:32 +0100286
287 /* create a coherent mapping */
David Rientjes633d5fc2020-06-11 12:20:28 -0700288 ret = dma_common_contiguous_remap(page, size,
Christoph Hellwig3acac062019-10-29 11:06:32 +0100289 dma_pgprot(dev, PAGE_KERNEL, attrs),
290 __builtin_return_address(0));
Christoph Hellwig3d0fc342020-02-21 12:26:00 -0800291 if (!ret)
292 goto out_free_pages;
Christoph Hellwigf3c96222021-11-09 15:20:40 +0100293 } else {
294 ret = page_address(page);
295 if (dma_set_decrypted(dev, ret, size))
296 goto out_free_pages;
Christoph Hellwigd98849a2019-06-14 16:17:27 +0200297 }
298
Christoph Hellwigc10f07a2018-03-19 11:38:25 +0100299 memset(ret, 0, size);
Christoph Hellwigc30700d2019-06-03 08:43:51 +0200300
Christoph Hellwigf3c96222021-11-09 15:20:40 +0100301 if (set_uncached) {
Christoph Hellwigc30700d2019-06-03 08:43:51 +0200302 arch_dma_prep_coherent(page, size);
Christoph Hellwigfa7e2242020-02-21 15:55:43 -0800303 ret = arch_dma_set_uncached(ret, size);
304 if (IS_ERR(ret))
David Rientjes96a539f2020-06-11 12:20:29 -0700305 goto out_encrypt_pages;
Christoph Hellwigc30700d2019-06-03 08:43:51 +0200306 }
Christoph Hellwigf3c96222021-11-09 15:20:40 +0100307
Christoph Hellwig96eb89c2020-08-17 17:20:52 +0200308 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
Christoph Hellwigc10f07a2018-03-19 11:38:25 +0100309 return ret;
David Rientjes96a539f2020-06-11 12:20:29 -0700310
311out_encrypt_pages:
Christoph Hellwig4d056472021-10-18 13:18:34 +0200312 if (dma_set_encrypted(dev, page_address(page), size))
313 return NULL;
Christoph Hellwig3d0fc342020-02-21 12:26:00 -0800314out_free_pages:
Claire Changf4111e32021-06-19 11:40:40 +0800315 __dma_direct_free_pages(dev, page, size);
Christoph Hellwig3d0fc342020-02-21 12:26:00 -0800316 return NULL;
Christoph Hellwig002e6742018-01-09 16:30:23 +0100317}
318
Christoph Hellwig2f5388a22020-08-17 17:06:40 +0200319void dma_direct_free(struct device *dev, size_t size,
320 void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
Christoph Hellwig002e6742018-01-09 16:30:23 +0100321{
Christoph Hellwigc10f07a2018-03-19 11:38:25 +0100322 unsigned int page_order = get_order(size);
Christoph Hellwig080321d2017-12-22 11:51:44 +0100323
Christoph Hellwigcf14be02019-08-06 14:33:23 +0300324 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
Claire Changf4111e32021-06-19 11:40:40 +0800325 !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) {
Christoph Hellwigd98849a2019-06-14 16:17:27 +0200326 /* cpu_addr is a struct page cookie, not a kernel address */
Christoph Hellwigacaade12019-10-29 09:57:09 +0100327 dma_free_contiguous(dev, cpu_addr, size);
Christoph Hellwigd98849a2019-06-14 16:17:27 +0200328 return;
329 }
330
Christoph Hellwig849face2020-10-07 11:04:08 +0200331 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
332 !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
Christoph Hellwigfaf4ef82021-06-23 14:21:16 +0200333 !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
Linus Torvalds3de18c82021-09-03 10:34:44 -0700334 !dev_is_dma_coherent(dev) &&
Claire Changf4111e32021-06-19 11:40:40 +0800335 !is_swiotlb_for_alloc(dev)) {
Christoph Hellwig849face2020-10-07 11:04:08 +0200336 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
337 return;
338 }
339
Christoph Hellwigfaf4ef82021-06-23 14:21:16 +0200340 if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
341 !dev_is_dma_coherent(dev)) {
342 if (!dma_release_from_global_coherent(page_order, cpu_addr))
343 WARN_ON_ONCE(1);
344 return;
345 }
346
Christoph Hellwig849face2020-10-07 11:04:08 +0200347 /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
348 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
349 dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
350 return;
351
Christoph Hellwig55704492021-10-21 09:20:39 +0200352 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
Christoph Hellwig3acac062019-10-29 11:06:32 +0100353 vunmap(cpu_addr);
Christoph Hellwig55704492021-10-21 09:20:39 +0200354 } else {
355 if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
356 arch_dma_clear_uncached(cpu_addr, size);
Christoph Hellwiga90cf302021-11-09 15:41:01 +0100357 if (dma_set_encrypted(dev, cpu_addr, 1 << page_order))
358 return;
Christoph Hellwig55704492021-10-21 09:20:39 +0200359 }
Christoph Hellwig3acac062019-10-29 11:06:32 +0100360
Claire Changf4111e32021-06-19 11:40:40 +0800361 __dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size);
Christoph Hellwig002e6742018-01-09 16:30:23 +0100362}
363
Christoph Hellwigefa70f22020-09-01 13:34:33 +0200364struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
365 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
366{
367 struct page *page;
368 void *ret;
369
Christoph Hellwig28e45762021-11-09 15:50:28 +0100370 if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
Christoph Hellwig5b138c52020-10-07 11:06:09 +0200371 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
Christoph Hellwigefa70f22020-09-01 13:34:33 +0200372
373 page = __dma_direct_alloc_pages(dev, size, gfp);
374 if (!page)
375 return NULL;
Christoph Hellwig08a89c22020-09-26 16:39:36 +0200376 if (PageHighMem(page)) {
377 /*
378 * Depending on the cma= arguments and per-arch setup
379 * dma_alloc_contiguous could return highmem pages.
380 * Without remapping there is no way to return them here,
381 * so log an error and fail.
382 */
383 dev_info(dev, "Rejecting highmem page from CMA.\n");
384 goto out_free_pages;
385 }
386
Christoph Hellwigefa70f22020-09-01 13:34:33 +0200387 ret = page_address(page);
Christoph Hellwig4d056472021-10-18 13:18:34 +0200388 if (dma_set_decrypted(dev, ret, size))
389 goto out_free_pages;
Christoph Hellwigefa70f22020-09-01 13:34:33 +0200390 memset(ret, 0, size);
Christoph Hellwigefa70f22020-09-01 13:34:33 +0200391 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
392 return page;
393out_free_pages:
Claire Changf4111e32021-06-19 11:40:40 +0800394 __dma_direct_free_pages(dev, page, size);
Christoph Hellwigefa70f22020-09-01 13:34:33 +0200395 return NULL;
396}
397
398void dma_direct_free_pages(struct device *dev, size_t size,
399 struct page *page, dma_addr_t dma_addr,
400 enum dma_data_direction dir)
401{
402 unsigned int page_order = get_order(size);
403 void *vaddr = page_address(page);
404
405 /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
Christoph Hellwig849face2020-10-07 11:04:08 +0200406 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
Christoph Hellwigefa70f22020-09-01 13:34:33 +0200407 dma_free_from_pool(dev, vaddr, size))
408 return;
409
Christoph Hellwiga90cf302021-11-09 15:41:01 +0100410 if (dma_set_encrypted(dev, vaddr, 1 << page_order))
411 return;
Claire Changf4111e32021-06-19 11:40:40 +0800412 __dma_direct_free_pages(dev, page, size);
Christoph Hellwigefa70f22020-09-01 13:34:33 +0200413}
414
Christoph Hellwig55897af2018-12-03 11:43:54 +0100415#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
416 defined(CONFIG_SWIOTLB)
Christoph Hellwig55897af2018-12-03 11:43:54 +0100417void dma_direct_sync_sg_for_device(struct device *dev,
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200418 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
419{
420 struct scatterlist *sg;
421 int i;
422
Christoph Hellwig55897af2018-12-03 11:43:54 +0100423 for_each_sg(sgl, sg, nents, i) {
Fugang Duan449fa542019-07-19 17:26:48 +0800424 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
425
Claire Chang7fd856a2021-06-19 11:40:35 +0800426 if (unlikely(is_swiotlb_buffer(dev, paddr)))
Christoph Hellwig80808d22021-03-01 08:44:26 +0100427 swiotlb_sync_single_for_device(dev, paddr, sg->length,
428 dir);
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200429
Christoph Hellwig55897af2018-12-03 11:43:54 +0100430 if (!dev_is_dma_coherent(dev))
Christoph Hellwig56e35f92019-11-07 18:03:11 +0100431 arch_sync_dma_for_device(paddr, sg->length,
Christoph Hellwig55897af2018-12-03 11:43:54 +0100432 dir);
433 }
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200434}
Christoph Hellwig17ac5242018-12-03 11:14:09 +0100435#endif
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200436
437#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
Christoph Hellwig55897af2018-12-03 11:43:54 +0100438 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
439 defined(CONFIG_SWIOTLB)
Christoph Hellwig55897af2018-12-03 11:43:54 +0100440void dma_direct_sync_sg_for_cpu(struct device *dev,
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200441 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
442{
443 struct scatterlist *sg;
444 int i;
445
Christoph Hellwig55897af2018-12-03 11:43:54 +0100446 for_each_sg(sgl, sg, nents, i) {
Fugang Duan449fa542019-07-19 17:26:48 +0800447 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
448
Christoph Hellwig55897af2018-12-03 11:43:54 +0100449 if (!dev_is_dma_coherent(dev))
Christoph Hellwig56e35f92019-11-07 18:03:11 +0100450 arch_sync_dma_for_cpu(paddr, sg->length, dir);
Fugang Duan449fa542019-07-19 17:26:48 +0800451
Claire Chang7fd856a2021-06-19 11:40:35 +0800452 if (unlikely(is_swiotlb_buffer(dev, paddr)))
Christoph Hellwig80808d22021-03-01 08:44:26 +0100453 swiotlb_sync_single_for_cpu(dev, paddr, sg->length,
454 dir);
Christoph Hellwigabdaf112020-08-17 16:41:50 +0200455
456 if (dir == DMA_FROM_DEVICE)
457 arch_dma_mark_clean(paddr, sg->length);
Christoph Hellwig55897af2018-12-03 11:43:54 +0100458 }
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200459
Christoph Hellwig55897af2018-12-03 11:43:54 +0100460 if (!dev_is_dma_coherent(dev))
Christoph Hellwig56e35f92019-11-07 18:03:11 +0100461 arch_sync_dma_for_cpu_all();
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200462}
463
Christoph Hellwig55897af2018-12-03 11:43:54 +0100464void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200465 int nents, enum dma_data_direction dir, unsigned long attrs)
466{
Christoph Hellwig55897af2018-12-03 11:43:54 +0100467 struct scatterlist *sg;
468 int i;
469
470 for_each_sg(sgl, sg, nents, i)
471 dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir,
472 attrs);
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200473}
474#endif
475
Christoph Hellwig782e6762018-04-16 15:24:51 +0200476int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
477 enum dma_data_direction dir, unsigned long attrs)
Christoph Hellwig002e6742018-01-09 16:30:23 +0100478{
479 int i;
480 struct scatterlist *sg;
481
482 for_each_sg(sgl, sg, nents, i) {
Christoph Hellwig17ac5242018-12-03 11:14:09 +0100483 sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
484 sg->offset, sg->length, dir, attrs);
485 if (sg->dma_address == DMA_MAPPING_ERROR)
Christoph Hellwig55897af2018-12-03 11:43:54 +0100486 goto out_unmap;
Christoph Hellwig002e6742018-01-09 16:30:23 +0100487 sg_dma_len(sg) = sg->length;
488 }
489
490 return nents;
Christoph Hellwig55897af2018-12-03 11:43:54 +0100491
492out_unmap:
493 dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
Logan Gunthorpec81be742021-07-29 14:15:20 -0600494 return -EIO;
Christoph Hellwig002e6742018-01-09 16:30:23 +0100495}
496
Christoph Hellwigcfced782019-01-04 18:20:05 +0100497dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
498 size_t size, enum dma_data_direction dir, unsigned long attrs)
499{
500 dma_addr_t dma_addr = paddr;
501
Christoph Hellwig68a33b12019-11-19 17:38:58 +0100502 if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
Christoph Hellwig75467ee2020-02-03 14:54:50 +0100503 dev_err_once(dev,
504 "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
505 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
506 WARN_ON_ONCE(1);
Christoph Hellwigcfced782019-01-04 18:20:05 +0100507 return DMA_MAPPING_ERROR;
508 }
509
510 return dma_addr;
511}
Christoph Hellwigcfced782019-01-04 18:20:05 +0100512
Christoph Hellwig34dc0ea2019-10-29 11:01:37 +0100513int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
514 void *cpu_addr, dma_addr_t dma_addr, size_t size,
515 unsigned long attrs)
516{
517 struct page *page = dma_direct_to_page(dev, dma_addr);
518 int ret;
519
520 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
521 if (!ret)
522 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
523 return ret;
524}
525
Christoph Hellwig34dc0ea2019-10-29 11:01:37 +0100526bool dma_direct_can_mmap(struct device *dev)
527{
528 return dev_is_dma_coherent(dev) ||
529 IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP);
530}
531
532int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
533 void *cpu_addr, dma_addr_t dma_addr, size_t size,
534 unsigned long attrs)
535{
536 unsigned long user_count = vma_pages(vma);
537 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
538 unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr));
539 int ret = -ENXIO;
540
541 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
542
543 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
544 return ret;
Christoph Hellwigfaf4ef82021-06-23 14:21:16 +0200545 if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
546 return ret;
Christoph Hellwig34dc0ea2019-10-29 11:01:37 +0100547
548 if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff)
549 return -ENXIO;
550 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
551 user_count << PAGE_SHIFT, vma->vm_page_prot);
552}
Christoph Hellwig34dc0ea2019-10-29 11:01:37 +0100553
Christoph Hellwig1a9777a2017-12-24 15:04:32 +0100554int dma_direct_supported(struct device *dev, u64 mask)
555{
Christoph Hellwig91ef26f2020-02-03 18:11:10 +0100556 u64 min_mask = (max_pfn - 1) << PAGE_SHIFT;
Christoph Hellwig9d7a2242018-09-07 09:31:58 +0200557
Christoph Hellwig91ef26f2020-02-03 18:11:10 +0100558 /*
559 * Because 32-bit DMA masks are so common we expect every architecture
560 * to be able to satisfy them - either by not supporting more physical
561 * memory, or by providing a ZONE_DMA32. If neither is the case, the
562 * architecture needs to use an IOMMU instead of the direct mapping.
563 */
564 if (mask >= DMA_BIT_MASK(32))
565 return 1;
Christoph Hellwig9d7a2242018-09-07 09:31:58 +0200566
Lendacky, Thomasc92a54c2018-12-17 14:39:16 +0000567 /*
Christoph Hellwig5ceda742020-08-17 17:34:03 +0200568 * This check needs to be against the actual bit mask value, so use
569 * phys_to_dma_unencrypted() here so that the SME encryption mask isn't
Lendacky, Thomasc92a54c2018-12-17 14:39:16 +0000570 * part of the check.
571 */
Christoph Hellwig91ef26f2020-02-03 18:11:10 +0100572 if (IS_ENABLED(CONFIG_ZONE_DMA))
573 min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits));
Christoph Hellwig5ceda742020-08-17 17:34:03 +0200574 return mask >= phys_to_dma_unencrypted(dev, min_mask);
Christoph Hellwig1a9777a2017-12-24 15:04:32 +0100575}
Joerg Roedel133d6242019-02-07 12:59:15 +0100576
577size_t dma_direct_max_mapping_size(struct device *dev)
578{
Joerg Roedel133d6242019-02-07 12:59:15 +0100579 /* If SWIOTLB is active, use its maximum mapping size */
Claire Chang6f2beb22021-06-19 11:40:36 +0800580 if (is_swiotlb_active(dev) &&
Claire Chang903cd0f2021-06-24 23:55:20 +0800581 (dma_addressing_limited(dev) || is_swiotlb_force_bounce(dev)))
Christoph Hellwiga5008b52019-07-16 22:00:54 +0200582 return swiotlb_max_mapping_size(dev);
583 return SIZE_MAX;
Joerg Roedel133d6242019-02-07 12:59:15 +0100584}
Christoph Hellwig3aa9162502020-06-29 15:03:56 +0200585
586bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr)
587{
588 return !dev_is_dma_coherent(dev) ||
Claire Chang7fd856a2021-06-19 11:40:35 +0800589 is_swiotlb_buffer(dev, dma_to_phys(dev, dma_addr));
Christoph Hellwig3aa9162502020-06-29 15:03:56 +0200590}
Jim Quinlane0d07272020-09-17 18:43:40 +0200591
592/**
593 * dma_direct_set_offset - Assign scalar offset for a single DMA range.
594 * @dev: device pointer; needed to "own" the alloced memory.
595 * @cpu_start: beginning of memory region covered by this offset.
596 * @dma_start: beginning of DMA/PCI region covered by this offset.
597 * @size: size of the region.
598 *
599 * This is for the simple case of a uniform offset which cannot
600 * be discovered by "dma-ranges".
601 *
602 * It returns -ENOMEM if out of memory, -EINVAL if a map
603 * already exists, 0 otherwise.
604 *
605 * Note: any call to this from a driver is a bug. The mapping needs
606 * to be described by the device tree or other firmware interfaces.
607 */
608int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
609 dma_addr_t dma_start, u64 size)
610{
611 struct bus_dma_region *map;
612 u64 offset = (u64)cpu_start - (u64)dma_start;
613
614 if (dev->dma_range_map) {
615 dev_err(dev, "attempt to add DMA range to existing map\n");
616 return -EINVAL;
617 }
618
619 if (!offset)
620 return 0;
621
622 map = kcalloc(2, sizeof(*map), GFP_KERNEL);
623 if (!map)
624 return -ENOMEM;
625 map[0].cpu_start = cpu_start;
626 map[0].dma_start = dma_start;
627 map[0].offset = offset;
628 map[0].size = size;
629 dev->dma_range_map = map;
630 return 0;
631}