blob: 4c6c5e0635e34d080fdfaed645f4d56b4c33fda7 [file] [log] [blame]
Christoph Hellwig002e6742018-01-09 16:30:23 +01001// SPDX-License-Identifier: GPL-2.0
2/*
Christoph Hellwigefa70f22020-09-01 13:34:33 +02003 * Copyright (C) 2018-2020 Christoph Hellwig.
Christoph Hellwigbc3ec752018-09-08 11:22:43 +02004 *
5 * DMA operations that map physical memory directly without using an IOMMU.
Christoph Hellwig002e6742018-01-09 16:30:23 +01006 */
Mike Rapoport57c8a662018-10-30 15:09:49 -07007#include <linux/memblock.h> /* for max_pfn */
Christoph Hellwig002e6742018-01-09 16:30:23 +01008#include <linux/export.h>
9#include <linux/mm.h>
Christoph Hellwig0a0f0d82020-09-22 15:31:03 +020010#include <linux/dma-map-ops.h>
Christoph Hellwig002e6742018-01-09 16:30:23 +010011#include <linux/scatterlist.h>
12#include <linux/pfn.h>
Christoph Hellwig3acac062019-10-29 11:06:32 +010013#include <linux/vmalloc.h>
Christoph Hellwigc10f07a2018-03-19 11:38:25 +010014#include <linux/set_memory.h>
Jim Quinlane0d07272020-09-17 18:43:40 +020015#include <linux/slab.h>
Christoph Hellwig19c65c32020-09-22 15:34:22 +020016#include "direct.h"
Christoph Hellwig002e6742018-01-09 16:30:23 +010017
Christoph Hellwigc61e9632018-01-09 23:39:03 +010018/*
Randy Dunlap7b7b8a22020-10-15 20:10:28 -070019 * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use
Nicolas Saenz Julienne8b5369e2019-10-14 20:31:03 +020020 * it for entirely different regions. In that case the arch code needs to
21 * override the variable below for dma-direct to work properly.
Christoph Hellwigc61e9632018-01-09 23:39:03 +010022 */
Nicolas Saenz Julienne8b5369e2019-10-14 20:31:03 +020023unsigned int zone_dma_bits __ro_after_init = 24;
Christoph Hellwigc61e9632018-01-09 23:39:03 +010024
Christoph Hellwiga20bb052018-09-20 13:26:13 +020025static inline dma_addr_t phys_to_dma_direct(struct device *dev,
26 phys_addr_t phys)
27{
Tom Lendacky9087c372019-07-10 19:01:19 +000028 if (force_dma_unencrypted(dev))
Christoph Hellwig5ceda742020-08-17 17:34:03 +020029 return phys_to_dma_unencrypted(dev, phys);
Christoph Hellwiga20bb052018-09-20 13:26:13 +020030 return phys_to_dma(dev, phys);
31}
32
Christoph Hellwig34dc0ea2019-10-29 11:01:37 +010033static inline struct page *dma_direct_to_page(struct device *dev,
34 dma_addr_t dma_addr)
35{
36 return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr)));
37}
38
Christoph Hellwiga20bb052018-09-20 13:26:13 +020039u64 dma_direct_get_required_mask(struct device *dev)
40{
Kishon Vijay Abraham Icdcda0d2020-04-06 10:58:36 +053041 phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT;
42 u64 max_dma = phys_to_dma_direct(dev, phys);
Christoph Hellwiga20bb052018-09-20 13:26:13 +020043
44 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
45}
46
Christoph Hellwig94201392020-08-14 12:26:24 +020047static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
David Rientjesc84dc6e2020-04-14 17:04:55 -070048 u64 *phys_limit)
Christoph Hellwig7d21ee42018-09-06 20:30:54 -040049{
Nicolas Saenz Juliennea7ba70f2019-11-21 10:26:44 +010050 u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit);
Christoph Hellwigb4ebe602018-09-20 14:04:08 +020051
Christoph Hellwig79ac32a2018-10-01 07:40:53 -070052 /*
53 * Optimistically try the zone that the physical address mask falls
54 * into first. If that returns memory that isn't actually addressable
55 * we will fallback to the next lower zone and try again.
56 *
57 * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
58 * zones.
59 */
Christoph Hellwig7bc5c422020-09-08 17:56:22 +020060 *phys_limit = dma_to_phys(dev, dma_limit);
Nicolas Saenz Juliennea7ba70f2019-11-21 10:26:44 +010061 if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits))
Christoph Hellwig7d21ee42018-09-06 20:30:54 -040062 return GFP_DMA;
Nicolas Saenz Juliennea7ba70f2019-11-21 10:26:44 +010063 if (*phys_limit <= DMA_BIT_MASK(32))
Christoph Hellwig7d21ee42018-09-06 20:30:54 -040064 return GFP_DMA32;
65 return 0;
66}
67
Christoph Hellwig94201392020-08-14 12:26:24 +020068static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
Christoph Hellwig95f18392018-01-09 23:40:57 +010069{
Jim Quinlane0d07272020-09-17 18:43:40 +020070 dma_addr_t dma_addr = phys_to_dma_direct(dev, phys);
71
72 if (dma_addr == DMA_MAPPING_ERROR)
73 return false;
74 return dma_addr + size - 1 <=
75 min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
Christoph Hellwig95f18392018-01-09 23:40:57 +010076}
77
Claire Changf4111e32021-06-19 11:40:40 +080078static void __dma_direct_free_pages(struct device *dev, struct page *page,
79 size_t size)
80{
81 if (IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL) &&
82 swiotlb_free(dev, page, size))
83 return;
84 dma_free_contiguous(dev, page, size);
85}
86
Christoph Hellwig26749b32020-06-15 08:52:31 +020087static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
Christoph Hellwig3773dfe2020-08-17 17:14:28 +020088 gfp_t gfp)
Christoph Hellwig002e6742018-01-09 16:30:23 +010089{
Christoph Hellwig90ae4092019-08-20 11:45:49 +090090 int node = dev_to_node(dev);
Christoph Hellwig080321d2017-12-22 11:51:44 +010091 struct page *page = NULL;
Nicolas Saenz Juliennea7ba70f2019-11-21 10:26:44 +010092 u64 phys_limit;
Christoph Hellwig002e6742018-01-09 16:30:23 +010093
David Rientjes633d5fc2020-06-11 12:20:28 -070094 WARN_ON_ONCE(!PAGE_ALIGNED(size));
95
David Rientjesc84dc6e2020-04-14 17:04:55 -070096 gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
97 &phys_limit);
Claire Changf4111e32021-06-19 11:40:40 +080098 if (IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL) &&
99 is_swiotlb_for_alloc(dev)) {
100 page = swiotlb_alloc(dev, size);
101 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
102 __dma_direct_free_pages(dev, page, size);
103 return NULL;
104 }
105 return page;
106 }
107
David Rientjes633d5fc2020-06-11 12:20:28 -0700108 page = dma_alloc_contiguous(dev, size, gfp);
Christoph Hellwig90ae4092019-08-20 11:45:49 +0900109 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
David Rientjes633d5fc2020-06-11 12:20:28 -0700110 dma_free_contiguous(dev, page, size);
Christoph Hellwig90ae4092019-08-20 11:45:49 +0900111 page = NULL;
112 }
Christoph Hellwig95f18392018-01-09 23:40:57 +0100113again:
Christoph Hellwig90ae4092019-08-20 11:45:49 +0900114 if (!page)
David Rientjes633d5fc2020-06-11 12:20:28 -0700115 page = alloc_pages_node(node, gfp, get_order(size));
Christoph Hellwig95f18392018-01-09 23:40:57 +0100116 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
Nicolin Chenb1d2dc02019-05-23 21:06:32 -0700117 dma_free_contiguous(dev, page, size);
Christoph Hellwig95f18392018-01-09 23:40:57 +0100118 page = NULL;
119
Takashi Iwaide7eab32018-04-16 17:18:19 +0200120 if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
Nicolas Saenz Juliennea7ba70f2019-11-21 10:26:44 +0100121 phys_limit < DMA_BIT_MASK(64) &&
Takashi Iwaide7eab32018-04-16 17:18:19 +0200122 !(gfp & (GFP_DMA32 | GFP_DMA))) {
123 gfp |= GFP_DMA32;
124 goto again;
125 }
126
Christoph Hellwigfbce2512019-02-13 08:01:03 +0100127 if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
Christoph Hellwig95f18392018-01-09 23:40:57 +0100128 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
129 goto again;
130 }
131 }
132
Christoph Hellwigb18814e72018-11-04 17:27:56 +0100133 return page;
134}
135
Christoph Hellwig5b138c52020-10-07 11:06:09 +0200136static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
137 dma_addr_t *dma_handle, gfp_t gfp)
138{
139 struct page *page;
140 u64 phys_mask;
141 void *ret;
142
143 gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
144 &phys_mask);
145 page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok);
146 if (!page)
147 return NULL;
148 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
149 return ret;
150}
151
Christoph Hellwig2f5388a22020-08-17 17:06:40 +0200152void *dma_direct_alloc(struct device *dev, size_t size,
Christoph Hellwigb18814e72018-11-04 17:27:56 +0100153 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
154{
155 struct page *page;
156 void *ret;
David Rientjes56fccf22020-06-11 12:20:30 -0700157 int err;
Christoph Hellwigb18814e72018-11-04 17:27:56 +0100158
David Rientjes633d5fc2020-06-11 12:20:28 -0700159 size = PAGE_ALIGN(size);
Christoph Hellwig3773dfe2020-08-17 17:14:28 +0200160 if (attrs & DMA_ATTR_NO_WARN)
161 gfp |= __GFP_NOWARN;
David Rientjes633d5fc2020-06-11 12:20:28 -0700162
Christoph Hellwig849face2020-10-07 11:04:08 +0200163 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
Claire Changf4111e32021-06-19 11:40:40 +0800164 !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) {
Christoph Hellwig849face2020-10-07 11:04:08 +0200165 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
166 if (!page)
167 return NULL;
168 /* remove any dirty cache lines on the kernel alias */
169 if (!PageHighMem(page))
170 arch_dma_prep_coherent(page, size);
171 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
172 /* return the page pointer as the opaque cookie */
173 return page;
174 }
175
176 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
177 !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
Christoph Hellwigfaf4ef82021-06-23 14:21:16 +0200178 !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
Linus Torvalds3de18c82021-09-03 10:34:44 -0700179 !dev_is_dma_coherent(dev) &&
Claire Changf4111e32021-06-19 11:40:40 +0800180 !is_swiotlb_for_alloc(dev))
Christoph Hellwig849face2020-10-07 11:04:08 +0200181 return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
182
Christoph Hellwigfaf4ef82021-06-23 14:21:16 +0200183 if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
184 !dev_is_dma_coherent(dev))
185 return dma_alloc_from_global_coherent(dev, size, dma_handle);
186
Christoph Hellwig849face2020-10-07 11:04:08 +0200187 /*
188 * Remapping or decrypting memory may block. If either is required and
189 * we can't block, allocate the memory from the atomic pools.
Claire Changf4111e32021-06-19 11:40:40 +0800190 * If restricted DMA (i.e., is_swiotlb_for_alloc) is required, one must
191 * set up another device coherent pool by shared-dma-pool and use
192 * dma_alloc_from_dev_coherent instead.
Christoph Hellwig849face2020-10-07 11:04:08 +0200193 */
194 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
195 !gfpflags_allow_blocking(gfp) &&
196 (force_dma_unencrypted(dev) ||
Claire Changf4111e32021-06-19 11:40:40 +0800197 (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
198 !dev_is_dma_coherent(dev))) &&
199 !is_swiotlb_for_alloc(dev))
Christoph Hellwig5b138c52020-10-07 11:06:09 +0200200 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
Christoph Hellwig3acac062019-10-29 11:06:32 +0100201
Christoph Hellwig3773dfe2020-08-17 17:14:28 +0200202 /* we always manually zero the memory once we are done */
203 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
Christoph Hellwig080321d2017-12-22 11:51:44 +0100204 if (!page)
205 return NULL;
Christoph Hellwigb18814e72018-11-04 17:27:56 +0100206
Christoph Hellwig3acac062019-10-29 11:06:32 +0100207 if ((IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
Christoph Hellwig849face2020-10-07 11:04:08 +0200208 !dev_is_dma_coherent(dev)) ||
Christoph Hellwig3acac062019-10-29 11:06:32 +0100209 (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) {
210 /* remove any dirty cache lines on the kernel alias */
David Rientjes633d5fc2020-06-11 12:20:28 -0700211 arch_dma_prep_coherent(page, size);
Christoph Hellwig3acac062019-10-29 11:06:32 +0100212
213 /* create a coherent mapping */
David Rientjes633d5fc2020-06-11 12:20:28 -0700214 ret = dma_common_contiguous_remap(page, size,
Christoph Hellwig3acac062019-10-29 11:06:32 +0100215 dma_pgprot(dev, PAGE_KERNEL, attrs),
216 __builtin_return_address(0));
Christoph Hellwig3d0fc342020-02-21 12:26:00 -0800217 if (!ret)
218 goto out_free_pages;
David Rientjes1a2b3352020-06-11 12:20:32 -0700219 if (force_dma_unencrypted(dev)) {
220 err = set_memory_decrypted((unsigned long)ret,
221 1 << get_order(size));
222 if (err)
223 goto out_free_pages;
224 }
Christoph Hellwig3acac062019-10-29 11:06:32 +0100225 memset(ret, 0, size);
226 goto done;
Christoph Hellwigd98849a2019-06-14 16:17:27 +0200227 }
228
Christoph Hellwig704f2c22018-09-22 20:47:26 +0200229 if (PageHighMem(page)) {
230 /*
231 * Depending on the cma= arguments and per-arch setup
Nicolin Chenb1d2dc02019-05-23 21:06:32 -0700232 * dma_alloc_contiguous could return highmem pages.
Christoph Hellwig704f2c22018-09-22 20:47:26 +0200233 * Without remapping there is no way to return them here,
234 * so log an error and fail.
235 */
236 dev_info(dev, "Rejecting highmem page from CMA.\n");
Christoph Hellwig3d0fc342020-02-21 12:26:00 -0800237 goto out_free_pages;
Christoph Hellwig704f2c22018-09-22 20:47:26 +0200238 }
239
Christoph Hellwigc10f07a2018-03-19 11:38:25 +0100240 ret = page_address(page);
David Rientjes56fccf22020-06-11 12:20:30 -0700241 if (force_dma_unencrypted(dev)) {
242 err = set_memory_decrypted((unsigned long)ret,
243 1 << get_order(size));
244 if (err)
245 goto out_free_pages;
246 }
Christoph Hellwig3acac062019-10-29 11:06:32 +0100247
Christoph Hellwigc10f07a2018-03-19 11:38:25 +0100248 memset(ret, 0, size);
Christoph Hellwigc30700d2019-06-03 08:43:51 +0200249
Christoph Hellwigfa7e2242020-02-21 15:55:43 -0800250 if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
Christoph Hellwig849face2020-10-07 11:04:08 +0200251 !dev_is_dma_coherent(dev)) {
Christoph Hellwigc30700d2019-06-03 08:43:51 +0200252 arch_dma_prep_coherent(page, size);
Christoph Hellwigfa7e2242020-02-21 15:55:43 -0800253 ret = arch_dma_set_uncached(ret, size);
254 if (IS_ERR(ret))
David Rientjes96a539f2020-06-11 12:20:29 -0700255 goto out_encrypt_pages;
Christoph Hellwigc30700d2019-06-03 08:43:51 +0200256 }
Christoph Hellwig3acac062019-10-29 11:06:32 +0100257done:
Christoph Hellwig96eb89c2020-08-17 17:20:52 +0200258 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
Christoph Hellwigc10f07a2018-03-19 11:38:25 +0100259 return ret;
David Rientjes96a539f2020-06-11 12:20:29 -0700260
261out_encrypt_pages:
David Rientjes56fccf22020-06-11 12:20:30 -0700262 if (force_dma_unencrypted(dev)) {
263 err = set_memory_encrypted((unsigned long)page_address(page),
264 1 << get_order(size));
265 /* If memory cannot be re-encrypted, it must be leaked */
266 if (err)
267 return NULL;
268 }
Christoph Hellwig3d0fc342020-02-21 12:26:00 -0800269out_free_pages:
Claire Changf4111e32021-06-19 11:40:40 +0800270 __dma_direct_free_pages(dev, page, size);
Christoph Hellwig3d0fc342020-02-21 12:26:00 -0800271 return NULL;
Christoph Hellwig002e6742018-01-09 16:30:23 +0100272}
273
Christoph Hellwig2f5388a22020-08-17 17:06:40 +0200274void dma_direct_free(struct device *dev, size_t size,
275 void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
Christoph Hellwig002e6742018-01-09 16:30:23 +0100276{
Christoph Hellwigc10f07a2018-03-19 11:38:25 +0100277 unsigned int page_order = get_order(size);
Christoph Hellwig080321d2017-12-22 11:51:44 +0100278
Christoph Hellwigcf14be02019-08-06 14:33:23 +0300279 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
Claire Changf4111e32021-06-19 11:40:40 +0800280 !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) {
Christoph Hellwigd98849a2019-06-14 16:17:27 +0200281 /* cpu_addr is a struct page cookie, not a kernel address */
Christoph Hellwigacaade12019-10-29 09:57:09 +0100282 dma_free_contiguous(dev, cpu_addr, size);
Christoph Hellwigd98849a2019-06-14 16:17:27 +0200283 return;
284 }
285
Christoph Hellwig849face2020-10-07 11:04:08 +0200286 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
287 !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
Christoph Hellwigfaf4ef82021-06-23 14:21:16 +0200288 !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
Linus Torvalds3de18c82021-09-03 10:34:44 -0700289 !dev_is_dma_coherent(dev) &&
Claire Changf4111e32021-06-19 11:40:40 +0800290 !is_swiotlb_for_alloc(dev)) {
Christoph Hellwig849face2020-10-07 11:04:08 +0200291 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
292 return;
293 }
294
Christoph Hellwigfaf4ef82021-06-23 14:21:16 +0200295 if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
296 !dev_is_dma_coherent(dev)) {
297 if (!dma_release_from_global_coherent(page_order, cpu_addr))
298 WARN_ON_ONCE(1);
299 return;
300 }
301
Christoph Hellwig849face2020-10-07 11:04:08 +0200302 /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
303 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
304 dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
305 return;
306
Tom Lendacky9087c372019-07-10 19:01:19 +0000307 if (force_dma_unencrypted(dev))
Christoph Hellwigc10f07a2018-03-19 11:38:25 +0100308 set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
Christoph Hellwigc30700d2019-06-03 08:43:51 +0200309
Christoph Hellwig3acac062019-10-29 11:06:32 +0100310 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
311 vunmap(cpu_addr);
Christoph Hellwig999a5d12020-02-21 12:35:05 -0800312 else if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
313 arch_dma_clear_uncached(cpu_addr, size);
Christoph Hellwig3acac062019-10-29 11:06:32 +0100314
Claire Changf4111e32021-06-19 11:40:40 +0800315 __dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size);
Christoph Hellwig002e6742018-01-09 16:30:23 +0100316}
317
Christoph Hellwigefa70f22020-09-01 13:34:33 +0200318struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
319 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
320{
321 struct page *page;
322 void *ret;
323
Christoph Hellwig849face2020-10-07 11:04:08 +0200324 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
Claire Changf4111e32021-06-19 11:40:40 +0800325 force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
326 !is_swiotlb_for_alloc(dev))
Christoph Hellwig5b138c52020-10-07 11:06:09 +0200327 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
Christoph Hellwigefa70f22020-09-01 13:34:33 +0200328
329 page = __dma_direct_alloc_pages(dev, size, gfp);
330 if (!page)
331 return NULL;
Christoph Hellwig08a89c22020-09-26 16:39:36 +0200332 if (PageHighMem(page)) {
333 /*
334 * Depending on the cma= arguments and per-arch setup
335 * dma_alloc_contiguous could return highmem pages.
336 * Without remapping there is no way to return them here,
337 * so log an error and fail.
338 */
339 dev_info(dev, "Rejecting highmem page from CMA.\n");
340 goto out_free_pages;
341 }
342
Christoph Hellwigefa70f22020-09-01 13:34:33 +0200343 ret = page_address(page);
344 if (force_dma_unencrypted(dev)) {
345 if (set_memory_decrypted((unsigned long)ret,
346 1 << get_order(size)))
347 goto out_free_pages;
348 }
349 memset(ret, 0, size);
Christoph Hellwigefa70f22020-09-01 13:34:33 +0200350 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
351 return page;
352out_free_pages:
Claire Changf4111e32021-06-19 11:40:40 +0800353 __dma_direct_free_pages(dev, page, size);
Christoph Hellwigefa70f22020-09-01 13:34:33 +0200354 return NULL;
355}
356
357void dma_direct_free_pages(struct device *dev, size_t size,
358 struct page *page, dma_addr_t dma_addr,
359 enum dma_data_direction dir)
360{
361 unsigned int page_order = get_order(size);
362 void *vaddr = page_address(page);
363
364 /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
Christoph Hellwig849face2020-10-07 11:04:08 +0200365 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
Christoph Hellwigefa70f22020-09-01 13:34:33 +0200366 dma_free_from_pool(dev, vaddr, size))
367 return;
368
369 if (force_dma_unencrypted(dev))
370 set_memory_encrypted((unsigned long)vaddr, 1 << page_order);
371
Claire Changf4111e32021-06-19 11:40:40 +0800372 __dma_direct_free_pages(dev, page, size);
Christoph Hellwigefa70f22020-09-01 13:34:33 +0200373}
374
Christoph Hellwig55897af2018-12-03 11:43:54 +0100375#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
376 defined(CONFIG_SWIOTLB)
Christoph Hellwig55897af2018-12-03 11:43:54 +0100377void dma_direct_sync_sg_for_device(struct device *dev,
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200378 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
379{
380 struct scatterlist *sg;
381 int i;
382
Christoph Hellwig55897af2018-12-03 11:43:54 +0100383 for_each_sg(sgl, sg, nents, i) {
Fugang Duan449fa542019-07-19 17:26:48 +0800384 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
385
Claire Chang7fd856a2021-06-19 11:40:35 +0800386 if (unlikely(is_swiotlb_buffer(dev, paddr)))
Christoph Hellwig80808d22021-03-01 08:44:26 +0100387 swiotlb_sync_single_for_device(dev, paddr, sg->length,
388 dir);
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200389
Christoph Hellwig55897af2018-12-03 11:43:54 +0100390 if (!dev_is_dma_coherent(dev))
Christoph Hellwig56e35f92019-11-07 18:03:11 +0100391 arch_sync_dma_for_device(paddr, sg->length,
Christoph Hellwig55897af2018-12-03 11:43:54 +0100392 dir);
393 }
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200394}
Christoph Hellwig17ac5242018-12-03 11:14:09 +0100395#endif
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200396
397#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
Christoph Hellwig55897af2018-12-03 11:43:54 +0100398 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
399 defined(CONFIG_SWIOTLB)
Christoph Hellwig55897af2018-12-03 11:43:54 +0100400void dma_direct_sync_sg_for_cpu(struct device *dev,
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200401 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
402{
403 struct scatterlist *sg;
404 int i;
405
Christoph Hellwig55897af2018-12-03 11:43:54 +0100406 for_each_sg(sgl, sg, nents, i) {
Fugang Duan449fa542019-07-19 17:26:48 +0800407 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
408
Christoph Hellwig55897af2018-12-03 11:43:54 +0100409 if (!dev_is_dma_coherent(dev))
Christoph Hellwig56e35f92019-11-07 18:03:11 +0100410 arch_sync_dma_for_cpu(paddr, sg->length, dir);
Fugang Duan449fa542019-07-19 17:26:48 +0800411
Claire Chang7fd856a2021-06-19 11:40:35 +0800412 if (unlikely(is_swiotlb_buffer(dev, paddr)))
Christoph Hellwig80808d22021-03-01 08:44:26 +0100413 swiotlb_sync_single_for_cpu(dev, paddr, sg->length,
414 dir);
Christoph Hellwigabdaf112020-08-17 16:41:50 +0200415
416 if (dir == DMA_FROM_DEVICE)
417 arch_dma_mark_clean(paddr, sg->length);
Christoph Hellwig55897af2018-12-03 11:43:54 +0100418 }
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200419
Christoph Hellwig55897af2018-12-03 11:43:54 +0100420 if (!dev_is_dma_coherent(dev))
Christoph Hellwig56e35f92019-11-07 18:03:11 +0100421 arch_sync_dma_for_cpu_all();
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200422}
423
Christoph Hellwig55897af2018-12-03 11:43:54 +0100424void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200425 int nents, enum dma_data_direction dir, unsigned long attrs)
426{
Christoph Hellwig55897af2018-12-03 11:43:54 +0100427 struct scatterlist *sg;
428 int i;
429
430 for_each_sg(sgl, sg, nents, i)
431 dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir,
432 attrs);
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200433}
434#endif
435
Christoph Hellwig782e6762018-04-16 15:24:51 +0200436int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
437 enum dma_data_direction dir, unsigned long attrs)
Christoph Hellwig002e6742018-01-09 16:30:23 +0100438{
439 int i;
440 struct scatterlist *sg;
441
442 for_each_sg(sgl, sg, nents, i) {
Christoph Hellwig17ac5242018-12-03 11:14:09 +0100443 sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
444 sg->offset, sg->length, dir, attrs);
445 if (sg->dma_address == DMA_MAPPING_ERROR)
Christoph Hellwig55897af2018-12-03 11:43:54 +0100446 goto out_unmap;
Christoph Hellwig002e6742018-01-09 16:30:23 +0100447 sg_dma_len(sg) = sg->length;
448 }
449
450 return nents;
Christoph Hellwig55897af2018-12-03 11:43:54 +0100451
452out_unmap:
453 dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
Logan Gunthorpec81be742021-07-29 14:15:20 -0600454 return -EIO;
Christoph Hellwig002e6742018-01-09 16:30:23 +0100455}
456
Christoph Hellwigcfced782019-01-04 18:20:05 +0100457dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
458 size_t size, enum dma_data_direction dir, unsigned long attrs)
459{
460 dma_addr_t dma_addr = paddr;
461
Christoph Hellwig68a33b12019-11-19 17:38:58 +0100462 if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
Christoph Hellwig75467ee2020-02-03 14:54:50 +0100463 dev_err_once(dev,
464 "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
465 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
466 WARN_ON_ONCE(1);
Christoph Hellwigcfced782019-01-04 18:20:05 +0100467 return DMA_MAPPING_ERROR;
468 }
469
470 return dma_addr;
471}
Christoph Hellwigcfced782019-01-04 18:20:05 +0100472
Christoph Hellwig34dc0ea2019-10-29 11:01:37 +0100473int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
474 void *cpu_addr, dma_addr_t dma_addr, size_t size,
475 unsigned long attrs)
476{
477 struct page *page = dma_direct_to_page(dev, dma_addr);
478 int ret;
479
480 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
481 if (!ret)
482 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
483 return ret;
484}
485
Christoph Hellwig34dc0ea2019-10-29 11:01:37 +0100486bool dma_direct_can_mmap(struct device *dev)
487{
488 return dev_is_dma_coherent(dev) ||
489 IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP);
490}
491
492int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
493 void *cpu_addr, dma_addr_t dma_addr, size_t size,
494 unsigned long attrs)
495{
496 unsigned long user_count = vma_pages(vma);
497 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
498 unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr));
499 int ret = -ENXIO;
500
501 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
502
503 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
504 return ret;
Christoph Hellwigfaf4ef82021-06-23 14:21:16 +0200505 if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
506 return ret;
Christoph Hellwig34dc0ea2019-10-29 11:01:37 +0100507
508 if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff)
509 return -ENXIO;
510 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
511 user_count << PAGE_SHIFT, vma->vm_page_prot);
512}
Christoph Hellwig34dc0ea2019-10-29 11:01:37 +0100513
Christoph Hellwig1a9777a2017-12-24 15:04:32 +0100514int dma_direct_supported(struct device *dev, u64 mask)
515{
Christoph Hellwig91ef26f2020-02-03 18:11:10 +0100516 u64 min_mask = (max_pfn - 1) << PAGE_SHIFT;
Christoph Hellwig9d7a2242018-09-07 09:31:58 +0200517
Christoph Hellwig91ef26f2020-02-03 18:11:10 +0100518 /*
519 * Because 32-bit DMA masks are so common we expect every architecture
520 * to be able to satisfy them - either by not supporting more physical
521 * memory, or by providing a ZONE_DMA32. If neither is the case, the
522 * architecture needs to use an IOMMU instead of the direct mapping.
523 */
524 if (mask >= DMA_BIT_MASK(32))
525 return 1;
Christoph Hellwig9d7a2242018-09-07 09:31:58 +0200526
Lendacky, Thomasc92a54c2018-12-17 14:39:16 +0000527 /*
Christoph Hellwig5ceda742020-08-17 17:34:03 +0200528 * This check needs to be against the actual bit mask value, so use
529 * phys_to_dma_unencrypted() here so that the SME encryption mask isn't
Lendacky, Thomasc92a54c2018-12-17 14:39:16 +0000530 * part of the check.
531 */
Christoph Hellwig91ef26f2020-02-03 18:11:10 +0100532 if (IS_ENABLED(CONFIG_ZONE_DMA))
533 min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits));
Christoph Hellwig5ceda742020-08-17 17:34:03 +0200534 return mask >= phys_to_dma_unencrypted(dev, min_mask);
Christoph Hellwig1a9777a2017-12-24 15:04:32 +0100535}
Joerg Roedel133d6242019-02-07 12:59:15 +0100536
537size_t dma_direct_max_mapping_size(struct device *dev)
538{
Joerg Roedel133d6242019-02-07 12:59:15 +0100539 /* If SWIOTLB is active, use its maximum mapping size */
Claire Chang6f2beb22021-06-19 11:40:36 +0800540 if (is_swiotlb_active(dev) &&
Claire Chang903cd0f2021-06-24 23:55:20 +0800541 (dma_addressing_limited(dev) || is_swiotlb_force_bounce(dev)))
Christoph Hellwiga5008b52019-07-16 22:00:54 +0200542 return swiotlb_max_mapping_size(dev);
543 return SIZE_MAX;
Joerg Roedel133d6242019-02-07 12:59:15 +0100544}
Christoph Hellwig3aa9162502020-06-29 15:03:56 +0200545
546bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr)
547{
548 return !dev_is_dma_coherent(dev) ||
Claire Chang7fd856a2021-06-19 11:40:35 +0800549 is_swiotlb_buffer(dev, dma_to_phys(dev, dma_addr));
Christoph Hellwig3aa9162502020-06-29 15:03:56 +0200550}
Jim Quinlane0d07272020-09-17 18:43:40 +0200551
552/**
553 * dma_direct_set_offset - Assign scalar offset for a single DMA range.
554 * @dev: device pointer; needed to "own" the alloced memory.
555 * @cpu_start: beginning of memory region covered by this offset.
556 * @dma_start: beginning of DMA/PCI region covered by this offset.
557 * @size: size of the region.
558 *
559 * This is for the simple case of a uniform offset which cannot
560 * be discovered by "dma-ranges".
561 *
562 * It returns -ENOMEM if out of memory, -EINVAL if a map
563 * already exists, 0 otherwise.
564 *
565 * Note: any call to this from a driver is a bug. The mapping needs
566 * to be described by the device tree or other firmware interfaces.
567 */
568int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
569 dma_addr_t dma_start, u64 size)
570{
571 struct bus_dma_region *map;
572 u64 offset = (u64)cpu_start - (u64)dma_start;
573
574 if (dev->dma_range_map) {
575 dev_err(dev, "attempt to add DMA range to existing map\n");
576 return -EINVAL;
577 }
578
579 if (!offset)
580 return 0;
581
582 map = kcalloc(2, sizeof(*map), GFP_KERNEL);
583 if (!map)
584 return -ENOMEM;
585 map[0].cpu_start = cpu_start;
586 map[0].dma_start = dma_start;
587 map[0].offset = offset;
588 map[0].size = size;
589 dev->dma_range_map = map;
590 return 0;
591}