blob: 22a12ab5a5e9aaac7ac5207d70b059bd75e48965 [file] [log] [blame]
Christoph Hellwig002e6742018-01-09 16:30:23 +01001// SPDX-License-Identifier: GPL-2.0
2/*
Christoph Hellwigbc3ec752018-09-08 11:22:43 +02003 * Copyright (C) 2018 Christoph Hellwig.
4 *
5 * DMA operations that map physical memory directly without using an IOMMU.
Christoph Hellwig002e6742018-01-09 16:30:23 +01006 */
Mike Rapoport57c8a662018-10-30 15:09:49 -07007#include <linux/memblock.h> /* for max_pfn */
Christoph Hellwig002e6742018-01-09 16:30:23 +01008#include <linux/export.h>
9#include <linux/mm.h>
Christoph Hellwig2e86a042017-12-22 11:29:51 +010010#include <linux/dma-direct.h>
Christoph Hellwig002e6742018-01-09 16:30:23 +010011#include <linux/scatterlist.h>
Christoph Hellwig080321d2017-12-22 11:51:44 +010012#include <linux/dma-contiguous.h>
Christoph Hellwigbc3ec752018-09-08 11:22:43 +020013#include <linux/dma-noncoherent.h>
Christoph Hellwig002e6742018-01-09 16:30:23 +010014#include <linux/pfn.h>
Christoph Hellwigc10f07a2018-03-19 11:38:25 +010015#include <linux/set_memory.h>
Christoph Hellwig002e6742018-01-09 16:30:23 +010016
Christoph Hellwigc61e9632018-01-09 23:39:03 +010017/*
18 * Most architectures use ZONE_DMA for the first 16 Megabytes, but
19 * some use it for entirely different regions:
20 */
21#ifndef ARCH_ZONE_DMA_BITS
22#define ARCH_ZONE_DMA_BITS 24
23#endif
24
Christoph Hellwigc10f07a2018-03-19 11:38:25 +010025/*
26 * For AMD SEV all DMA must be to unencrypted addresses.
27 */
28static inline bool force_dma_unencrypted(void)
29{
30 return sev_active();
31}
32
Christoph Hellwig27975962018-01-09 16:30:47 +010033static bool
34check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
35 const char *caller)
36{
37 if (unlikely(dev && !dma_capable(dev, dma_addr, size))) {
Christoph Hellwig2550bbfd2018-05-29 16:15:12 +020038 if (!dev->dma_mask) {
39 dev_err(dev,
40 "%s: call on device without dma_mask\n",
41 caller);
42 return false;
43 }
44
Christoph Hellwigb4ebe602018-09-20 14:04:08 +020045 if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_mask) {
Christoph Hellwig27975962018-01-09 16:30:47 +010046 dev_err(dev,
Christoph Hellwigb4ebe602018-09-20 14:04:08 +020047 "%s: overflow %pad+%zu of device mask %llx bus mask %llx\n",
48 caller, &dma_addr, size,
49 *dev->dma_mask, dev->bus_dma_mask);
Christoph Hellwig27975962018-01-09 16:30:47 +010050 }
51 return false;
52 }
53 return true;
54}
55
Christoph Hellwiga20bb052018-09-20 13:26:13 +020056static inline dma_addr_t phys_to_dma_direct(struct device *dev,
57 phys_addr_t phys)
58{
59 if (force_dma_unencrypted())
60 return __phys_to_dma(dev, phys);
61 return phys_to_dma(dev, phys);
62}
63
64u64 dma_direct_get_required_mask(struct device *dev)
65{
66 u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT);
67
Christoph Hellwigb4ebe602018-09-20 14:04:08 +020068 if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma)
69 max_dma = dev->bus_dma_mask;
70
Christoph Hellwiga20bb052018-09-20 13:26:13 +020071 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
72}
73
Christoph Hellwig7d21ee42018-09-06 20:30:54 -040074static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
75 u64 *phys_mask)
76{
Christoph Hellwigb4ebe602018-09-20 14:04:08 +020077 if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask)
78 dma_mask = dev->bus_dma_mask;
79
Christoph Hellwig7d21ee42018-09-06 20:30:54 -040080 if (force_dma_unencrypted())
81 *phys_mask = __dma_to_phys(dev, dma_mask);
82 else
83 *phys_mask = dma_to_phys(dev, dma_mask);
84
Christoph Hellwig79ac32a2018-10-01 07:40:53 -070085 /*
86 * Optimistically try the zone that the physical address mask falls
87 * into first. If that returns memory that isn't actually addressable
88 * we will fallback to the next lower zone and try again.
89 *
90 * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
91 * zones.
92 */
Christoph Hellwig7d21ee42018-09-06 20:30:54 -040093 if (*phys_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
94 return GFP_DMA;
95 if (*phys_mask <= DMA_BIT_MASK(32))
96 return GFP_DMA32;
97 return 0;
98}
99
Christoph Hellwig95f18392018-01-09 23:40:57 +0100100static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
101{
Christoph Hellwiga20bb052018-09-20 13:26:13 +0200102 return phys_to_dma_direct(dev, phys) + size - 1 <=
Christoph Hellwigb4ebe602018-09-20 14:04:08 +0200103 min_not_zero(dev->coherent_dma_mask, dev->bus_dma_mask);
Christoph Hellwig95f18392018-01-09 23:40:57 +0100104}
105
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200106void *dma_direct_alloc_pages(struct device *dev, size_t size,
107 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
Christoph Hellwig002e6742018-01-09 16:30:23 +0100108{
Christoph Hellwig080321d2017-12-22 11:51:44 +0100109 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
110 int page_order = get_order(size);
111 struct page *page = NULL;
Christoph Hellwig7d21ee42018-09-06 20:30:54 -0400112 u64 phys_mask;
Christoph Hellwigc10f07a2018-03-19 11:38:25 +0100113 void *ret;
Christoph Hellwig002e6742018-01-09 16:30:23 +0100114
Christoph Hellwigb9fd0422018-09-24 13:10:34 +0200115 if (attrs & DMA_ATTR_NO_WARN)
116 gfp |= __GFP_NOWARN;
117
Christoph Hellwige89f5b32018-03-28 15:35:35 +0200118 /* we always manually zero the memory once we are done: */
119 gfp &= ~__GFP_ZERO;
Christoph Hellwig7d21ee42018-09-06 20:30:54 -0400120 gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
121 &phys_mask);
Christoph Hellwig95f18392018-01-09 23:40:57 +0100122again:
Christoph Hellwig080321d2017-12-22 11:51:44 +0100123 /* CMA can be used only in the context which permits sleeping */
Christoph Hellwig95f18392018-01-09 23:40:57 +0100124 if (gfpflags_allow_blocking(gfp)) {
Marek Szyprowskid834c5a2018-08-17 15:49:00 -0700125 page = dma_alloc_from_contiguous(dev, count, page_order,
126 gfp & __GFP_NOWARN);
Christoph Hellwig95f18392018-01-09 23:40:57 +0100127 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
128 dma_release_from_contiguous(dev, page, count);
129 page = NULL;
130 }
131 }
Christoph Hellwig080321d2017-12-22 11:51:44 +0100132 if (!page)
Christoph Hellwig21f237e2017-12-22 11:55:23 +0100133 page = alloc_pages_node(dev_to_node(dev), gfp, page_order);
Christoph Hellwig95f18392018-01-09 23:40:57 +0100134
135 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
136 __free_pages(page, page_order);
137 page = NULL;
138
Takashi Iwaide7eab32018-04-16 17:18:19 +0200139 if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
Christoph Hellwig7d21ee42018-09-06 20:30:54 -0400140 phys_mask < DMA_BIT_MASK(64) &&
Takashi Iwaide7eab32018-04-16 17:18:19 +0200141 !(gfp & (GFP_DMA32 | GFP_DMA))) {
142 gfp |= GFP_DMA32;
143 goto again;
144 }
145
Takashi Iwai504a9182018-04-15 11:08:07 +0200146 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
Christoph Hellwig7d21ee42018-09-06 20:30:54 -0400147 phys_mask < DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) {
Christoph Hellwig95f18392018-01-09 23:40:57 +0100148 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
149 goto again;
150 }
151 }
152
Christoph Hellwig080321d2017-12-22 11:51:44 +0100153 if (!page)
154 return NULL;
Christoph Hellwigc10f07a2018-03-19 11:38:25 +0100155 ret = page_address(page);
156 if (force_dma_unencrypted()) {
157 set_memory_decrypted((unsigned long)ret, 1 << page_order);
158 *dma_handle = __phys_to_dma(dev, page_to_phys(page));
159 } else {
160 *dma_handle = phys_to_dma(dev, page_to_phys(page));
161 }
162 memset(ret, 0, size);
163 return ret;
Christoph Hellwig002e6742018-01-09 16:30:23 +0100164}
165
Christoph Hellwig42ed6452018-02-02 09:51:14 +0100166/*
167 * NOTE: this function must never look at the dma_addr argument, because we want
168 * to be able to use it as a helper for iommu implementations as well.
169 */
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200170void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
Christoph Hellwig002e6742018-01-09 16:30:23 +0100171 dma_addr_t dma_addr, unsigned long attrs)
172{
Christoph Hellwig080321d2017-12-22 11:51:44 +0100173 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
Christoph Hellwigc10f07a2018-03-19 11:38:25 +0100174 unsigned int page_order = get_order(size);
Christoph Hellwig080321d2017-12-22 11:51:44 +0100175
Christoph Hellwigc10f07a2018-03-19 11:38:25 +0100176 if (force_dma_unencrypted())
177 set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
Christoph Hellwig080321d2017-12-22 11:51:44 +0100178 if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
Christoph Hellwigc10f07a2018-03-19 11:38:25 +0100179 free_pages((unsigned long)cpu_addr, page_order);
Christoph Hellwig002e6742018-01-09 16:30:23 +0100180}
181
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200182void *dma_direct_alloc(struct device *dev, size_t size,
183 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
184{
185 if (!dev_is_dma_coherent(dev))
186 return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
187 return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
188}
189
190void dma_direct_free(struct device *dev, size_t size,
191 void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
192{
193 if (!dev_is_dma_coherent(dev))
194 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
195 else
196 dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
197}
198
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200199static void dma_direct_sync_single_for_device(struct device *dev,
200 dma_addr_t addr, size_t size, enum dma_data_direction dir)
201{
202 if (dev_is_dma_coherent(dev))
203 return;
204 arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir);
205}
206
207static void dma_direct_sync_sg_for_device(struct device *dev,
208 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
209{
210 struct scatterlist *sg;
211 int i;
212
213 if (dev_is_dma_coherent(dev))
214 return;
215
216 for_each_sg(sgl, sg, nents, i)
217 arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
218}
219
220#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
221 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
222static void dma_direct_sync_single_for_cpu(struct device *dev,
223 dma_addr_t addr, size_t size, enum dma_data_direction dir)
224{
225 if (dev_is_dma_coherent(dev))
226 return;
227 arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir);
228 arch_sync_dma_for_cpu_all(dev);
229}
230
231static void dma_direct_sync_sg_for_cpu(struct device *dev,
232 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
233{
234 struct scatterlist *sg;
235 int i;
236
237 if (dev_is_dma_coherent(dev))
238 return;
239
240 for_each_sg(sgl, sg, nents, i)
241 arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
242 arch_sync_dma_for_cpu_all(dev);
243}
244
245static void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
246 size_t size, enum dma_data_direction dir, unsigned long attrs)
247{
248 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
249 dma_direct_sync_single_for_cpu(dev, addr, size, dir);
250}
251
252static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
253 int nents, enum dma_data_direction dir, unsigned long attrs)
254{
255 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
256 dma_direct_sync_sg_for_cpu(dev, sgl, nents, dir);
257}
258#endif
259
Christoph Hellwig782e6762018-04-16 15:24:51 +0200260dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
Christoph Hellwig002e6742018-01-09 16:30:23 +0100261 unsigned long offset, size_t size, enum dma_data_direction dir,
262 unsigned long attrs)
263{
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200264 phys_addr_t phys = page_to_phys(page) + offset;
265 dma_addr_t dma_addr = phys_to_dma(dev, phys);
Christoph Hellwig27975962018-01-09 16:30:47 +0100266
267 if (!check_addr(dev, dma_addr, size, __func__))
268 return DIRECT_MAPPING_ERROR;
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200269
270 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
271 dma_direct_sync_single_for_device(dev, dma_addr, size, dir);
Christoph Hellwig27975962018-01-09 16:30:47 +0100272 return dma_addr;
Christoph Hellwig002e6742018-01-09 16:30:23 +0100273}
274
Christoph Hellwig782e6762018-04-16 15:24:51 +0200275int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
276 enum dma_data_direction dir, unsigned long attrs)
Christoph Hellwig002e6742018-01-09 16:30:23 +0100277{
278 int i;
279 struct scatterlist *sg;
280
281 for_each_sg(sgl, sg, nents, i) {
Christoph Hellwig002e6742018-01-09 16:30:23 +0100282 BUG_ON(!sg_page(sg));
Christoph Hellwig2e86a042017-12-22 11:29:51 +0100283
284 sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg));
Christoph Hellwig27975962018-01-09 16:30:47 +0100285 if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__))
286 return 0;
Christoph Hellwig002e6742018-01-09 16:30:23 +0100287 sg_dma_len(sg) = sg->length;
288 }
289
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200290 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
291 dma_direct_sync_sg_for_device(dev, sgl, nents, dir);
Christoph Hellwig002e6742018-01-09 16:30:23 +0100292 return nents;
293}
294
Christoph Hellwig9d7a2242018-09-07 09:31:58 +0200295/*
296 * Because 32-bit DMA masks are so common we expect every architecture to be
297 * able to satisfy them - either by not supporting more physical memory, or by
298 * providing a ZONE_DMA32. If neither is the case, the architecture needs to
299 * use an IOMMU instead of the direct mapping.
300 */
Christoph Hellwig1a9777a2017-12-24 15:04:32 +0100301int dma_direct_supported(struct device *dev, u64 mask)
302{
Christoph Hellwig9d7a2242018-09-07 09:31:58 +0200303 u64 min_mask;
304
305 if (IS_ENABLED(CONFIG_ZONE_DMA))
306 min_mask = DMA_BIT_MASK(ARCH_ZONE_DMA_BITS);
307 else
308 min_mask = DMA_BIT_MASK(32);
309
310 min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT);
311
Alexander Duyck1fc8e642018-10-03 16:48:07 -0700312 return mask >= phys_to_dma(dev, min_mask);
Christoph Hellwig1a9777a2017-12-24 15:04:32 +0100313}
314
Christoph Hellwig782e6762018-04-16 15:24:51 +0200315int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
Christoph Hellwig27975962018-01-09 16:30:47 +0100316{
317 return dma_addr == DIRECT_MAPPING_ERROR;
318}
319
Christoph Hellwig002e6742018-01-09 16:30:23 +0100320const struct dma_map_ops dma_direct_ops = {
321 .alloc = dma_direct_alloc,
322 .free = dma_direct_free,
323 .map_page = dma_direct_map_page,
324 .map_sg = dma_direct_map_sg,
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200325#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE)
326 .sync_single_for_device = dma_direct_sync_single_for_device,
327 .sync_sg_for_device = dma_direct_sync_sg_for_device,
328#endif
329#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
330 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
331 .sync_single_for_cpu = dma_direct_sync_single_for_cpu,
332 .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
333 .unmap_page = dma_direct_unmap_page,
334 .unmap_sg = dma_direct_unmap_sg,
335#endif
Christoph Hellwiga20bb052018-09-20 13:26:13 +0200336 .get_required_mask = dma_direct_get_required_mask,
Christoph Hellwig1a9777a2017-12-24 15:04:32 +0100337 .dma_supported = dma_direct_supported,
Christoph Hellwig27975962018-01-09 16:30:47 +0100338 .mapping_error = dma_direct_mapping_error,
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200339 .cache_sync = arch_dma_cache_sync,
Christoph Hellwig002e6742018-01-09 16:30:23 +0100340};
341EXPORT_SYMBOL(dma_direct_ops);