blob: 85d8286a0ba27cf4d4baa26c20ce208df9b47b3a [file] [log] [blame]
Christoph Hellwig002e6742018-01-09 16:30:23 +01001// SPDX-License-Identifier: GPL-2.0
2/*
Christoph Hellwigbc3ec752018-09-08 11:22:43 +02003 * Copyright (C) 2018 Christoph Hellwig.
4 *
5 * DMA operations that map physical memory directly without using an IOMMU.
Christoph Hellwig002e6742018-01-09 16:30:23 +01006 */
Mike Rapoport57c8a662018-10-30 15:09:49 -07007#include <linux/memblock.h> /* for max_pfn */
Christoph Hellwig002e6742018-01-09 16:30:23 +01008#include <linux/export.h>
9#include <linux/mm.h>
Christoph Hellwig2e86a042017-12-22 11:29:51 +010010#include <linux/dma-direct.h>
Christoph Hellwig002e6742018-01-09 16:30:23 +010011#include <linux/scatterlist.h>
Christoph Hellwig080321d2017-12-22 11:51:44 +010012#include <linux/dma-contiguous.h>
Christoph Hellwigbc3ec752018-09-08 11:22:43 +020013#include <linux/dma-noncoherent.h>
Christoph Hellwig002e6742018-01-09 16:30:23 +010014#include <linux/pfn.h>
Christoph Hellwigc10f07a2018-03-19 11:38:25 +010015#include <linux/set_memory.h>
Christoph Hellwig55897af2018-12-03 11:43:54 +010016#include <linux/swiotlb.h>
Christoph Hellwig002e6742018-01-09 16:30:23 +010017
Christoph Hellwigc61e9632018-01-09 23:39:03 +010018/*
19 * Most architectures use ZONE_DMA for the first 16 Megabytes, but
20 * some use it for entirely different regions:
21 */
22#ifndef ARCH_ZONE_DMA_BITS
23#define ARCH_ZONE_DMA_BITS 24
24#endif
25
Christoph Hellwigc10f07a2018-03-19 11:38:25 +010026/*
27 * For AMD SEV all DMA must be to unencrypted addresses.
28 */
29static inline bool force_dma_unencrypted(void)
30{
31 return sev_active();
32}
33
Christoph Hellwig58dfd4a2018-12-03 07:43:05 +010034static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size)
Christoph Hellwig27975962018-01-09 16:30:47 +010035{
Christoph Hellwig58dfd4a2018-12-03 07:43:05 +010036 if (!dev->dma_mask) {
37 dev_err_once(dev, "DMA map on device without dma_mask\n");
38 } else if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_mask) {
39 dev_err_once(dev,
40 "overflow %pad+%zu of DMA mask %llx bus mask %llx\n",
41 &dma_addr, size, *dev->dma_mask, dev->bus_dma_mask);
Christoph Hellwig27975962018-01-09 16:30:47 +010042 }
Christoph Hellwig58dfd4a2018-12-03 07:43:05 +010043 WARN_ON_ONCE(1);
Christoph Hellwig27975962018-01-09 16:30:47 +010044}
45
Christoph Hellwiga20bb052018-09-20 13:26:13 +020046static inline dma_addr_t phys_to_dma_direct(struct device *dev,
47 phys_addr_t phys)
48{
49 if (force_dma_unencrypted())
50 return __phys_to_dma(dev, phys);
51 return phys_to_dma(dev, phys);
52}
53
54u64 dma_direct_get_required_mask(struct device *dev)
55{
56 u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT);
57
Christoph Hellwigb4ebe602018-09-20 14:04:08 +020058 if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma)
59 max_dma = dev->bus_dma_mask;
60
Christoph Hellwiga20bb052018-09-20 13:26:13 +020061 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
62}
63
Christoph Hellwig7d21ee42018-09-06 20:30:54 -040064static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
65 u64 *phys_mask)
66{
Christoph Hellwigb4ebe602018-09-20 14:04:08 +020067 if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask)
68 dma_mask = dev->bus_dma_mask;
69
Christoph Hellwig7d21ee42018-09-06 20:30:54 -040070 if (force_dma_unencrypted())
71 *phys_mask = __dma_to_phys(dev, dma_mask);
72 else
73 *phys_mask = dma_to_phys(dev, dma_mask);
74
Christoph Hellwig79ac32a2018-10-01 07:40:53 -070075 /*
76 * Optimistically try the zone that the physical address mask falls
77 * into first. If that returns memory that isn't actually addressable
78 * we will fallback to the next lower zone and try again.
79 *
80 * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
81 * zones.
82 */
Christoph Hellwig7d21ee42018-09-06 20:30:54 -040083 if (*phys_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
84 return GFP_DMA;
85 if (*phys_mask <= DMA_BIT_MASK(32))
86 return GFP_DMA32;
87 return 0;
88}
89
Christoph Hellwig95f18392018-01-09 23:40:57 +010090static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
91{
Christoph Hellwiga20bb052018-09-20 13:26:13 +020092 return phys_to_dma_direct(dev, phys) + size - 1 <=
Christoph Hellwigb4ebe602018-09-20 14:04:08 +020093 min_not_zero(dev->coherent_dma_mask, dev->bus_dma_mask);
Christoph Hellwig95f18392018-01-09 23:40:57 +010094}
95
Christoph Hellwigb18814e72018-11-04 17:27:56 +010096struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
Christoph Hellwigbc3ec752018-09-08 11:22:43 +020097 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
Christoph Hellwig002e6742018-01-09 16:30:23 +010098{
Christoph Hellwig080321d2017-12-22 11:51:44 +010099 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
100 int page_order = get_order(size);
101 struct page *page = NULL;
Christoph Hellwig7d21ee42018-09-06 20:30:54 -0400102 u64 phys_mask;
Christoph Hellwig002e6742018-01-09 16:30:23 +0100103
Christoph Hellwigb9fd0422018-09-24 13:10:34 +0200104 if (attrs & DMA_ATTR_NO_WARN)
105 gfp |= __GFP_NOWARN;
106
Christoph Hellwige89f5b32018-03-28 15:35:35 +0200107 /* we always manually zero the memory once we are done: */
108 gfp &= ~__GFP_ZERO;
Christoph Hellwig7d21ee42018-09-06 20:30:54 -0400109 gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
110 &phys_mask);
Christoph Hellwig95f18392018-01-09 23:40:57 +0100111again:
Christoph Hellwig080321d2017-12-22 11:51:44 +0100112 /* CMA can be used only in the context which permits sleeping */
Christoph Hellwig95f18392018-01-09 23:40:57 +0100113 if (gfpflags_allow_blocking(gfp)) {
Marek Szyprowskid834c5a2018-08-17 15:49:00 -0700114 page = dma_alloc_from_contiguous(dev, count, page_order,
115 gfp & __GFP_NOWARN);
Christoph Hellwig95f18392018-01-09 23:40:57 +0100116 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
117 dma_release_from_contiguous(dev, page, count);
118 page = NULL;
119 }
120 }
Christoph Hellwig080321d2017-12-22 11:51:44 +0100121 if (!page)
Christoph Hellwig21f237e2017-12-22 11:55:23 +0100122 page = alloc_pages_node(dev_to_node(dev), gfp, page_order);
Christoph Hellwig95f18392018-01-09 23:40:57 +0100123
124 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
125 __free_pages(page, page_order);
126 page = NULL;
127
Takashi Iwaide7eab32018-04-16 17:18:19 +0200128 if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
Christoph Hellwig7d21ee42018-09-06 20:30:54 -0400129 phys_mask < DMA_BIT_MASK(64) &&
Takashi Iwaide7eab32018-04-16 17:18:19 +0200130 !(gfp & (GFP_DMA32 | GFP_DMA))) {
131 gfp |= GFP_DMA32;
132 goto again;
133 }
134
Takashi Iwai504a9182018-04-15 11:08:07 +0200135 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
Christoph Hellwig7d21ee42018-09-06 20:30:54 -0400136 phys_mask < DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) {
Christoph Hellwig95f18392018-01-09 23:40:57 +0100137 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
138 goto again;
139 }
140 }
141
Christoph Hellwigb18814e72018-11-04 17:27:56 +0100142 return page;
143}
144
145void *dma_direct_alloc_pages(struct device *dev, size_t size,
146 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
147{
148 struct page *page;
149 void *ret;
150
151 page = __dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
Christoph Hellwig080321d2017-12-22 11:51:44 +0100152 if (!page)
153 return NULL;
Christoph Hellwigb18814e72018-11-04 17:27:56 +0100154
Christoph Hellwig704f2c22018-09-22 20:47:26 +0200155 if (PageHighMem(page)) {
156 /*
157 * Depending on the cma= arguments and per-arch setup
158 * dma_alloc_from_contiguous could return highmem pages.
159 * Without remapping there is no way to return them here,
160 * so log an error and fail.
161 */
162 dev_info(dev, "Rejecting highmem page from CMA.\n");
163 __dma_direct_free_pages(dev, size, page);
164 return NULL;
165 }
166
Christoph Hellwigc10f07a2018-03-19 11:38:25 +0100167 ret = page_address(page);
168 if (force_dma_unencrypted()) {
Christoph Hellwigb18814e72018-11-04 17:27:56 +0100169 set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
Christoph Hellwigc10f07a2018-03-19 11:38:25 +0100170 *dma_handle = __phys_to_dma(dev, page_to_phys(page));
171 } else {
172 *dma_handle = phys_to_dma(dev, page_to_phys(page));
173 }
174 memset(ret, 0, size);
175 return ret;
Christoph Hellwig002e6742018-01-09 16:30:23 +0100176}
177
Christoph Hellwigb18814e72018-11-04 17:27:56 +0100178void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page)
179{
180 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
181
182 if (!dma_release_from_contiguous(dev, page, count))
183 __free_pages(page, get_order(size));
184}
185
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200186void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
Christoph Hellwig002e6742018-01-09 16:30:23 +0100187 dma_addr_t dma_addr, unsigned long attrs)
188{
Christoph Hellwigc10f07a2018-03-19 11:38:25 +0100189 unsigned int page_order = get_order(size);
Christoph Hellwig080321d2017-12-22 11:51:44 +0100190
Christoph Hellwigc10f07a2018-03-19 11:38:25 +0100191 if (force_dma_unencrypted())
192 set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
Christoph Hellwigb18814e72018-11-04 17:27:56 +0100193 __dma_direct_free_pages(dev, size, virt_to_page(cpu_addr));
Christoph Hellwig002e6742018-01-09 16:30:23 +0100194}
195
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200196void *dma_direct_alloc(struct device *dev, size_t size,
197 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
198{
199 if (!dev_is_dma_coherent(dev))
200 return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
201 return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
202}
203
204void dma_direct_free(struct device *dev, size_t size,
205 void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
206{
207 if (!dev_is_dma_coherent(dev))
208 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
209 else
210 dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
211}
212
Christoph Hellwig55897af2018-12-03 11:43:54 +0100213#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
214 defined(CONFIG_SWIOTLB)
215void dma_direct_sync_single_for_device(struct device *dev,
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200216 dma_addr_t addr, size_t size, enum dma_data_direction dir)
217{
Christoph Hellwig55897af2018-12-03 11:43:54 +0100218 phys_addr_t paddr = dma_to_phys(dev, addr);
219
220 if (unlikely(is_swiotlb_buffer(paddr)))
221 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
222
223 if (!dev_is_dma_coherent(dev))
224 arch_sync_dma_for_device(dev, paddr, size, dir);
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200225}
226
Christoph Hellwig55897af2018-12-03 11:43:54 +0100227void dma_direct_sync_sg_for_device(struct device *dev,
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200228 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
229{
230 struct scatterlist *sg;
231 int i;
232
Christoph Hellwig55897af2018-12-03 11:43:54 +0100233 for_each_sg(sgl, sg, nents, i) {
234 if (unlikely(is_swiotlb_buffer(sg_phys(sg))))
235 swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length,
236 dir, SYNC_FOR_DEVICE);
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200237
Christoph Hellwig55897af2018-12-03 11:43:54 +0100238 if (!dev_is_dma_coherent(dev))
239 arch_sync_dma_for_device(dev, sg_phys(sg), sg->length,
240 dir);
241 }
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200242}
Christoph Hellwig17ac5242018-12-03 11:14:09 +0100243#endif
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200244
245#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
Christoph Hellwig55897af2018-12-03 11:43:54 +0100246 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
247 defined(CONFIG_SWIOTLB)
248void dma_direct_sync_single_for_cpu(struct device *dev,
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200249 dma_addr_t addr, size_t size, enum dma_data_direction dir)
250{
Christoph Hellwig55897af2018-12-03 11:43:54 +0100251 phys_addr_t paddr = dma_to_phys(dev, addr);
252
253 if (!dev_is_dma_coherent(dev)) {
254 arch_sync_dma_for_cpu(dev, paddr, size, dir);
255 arch_sync_dma_for_cpu_all(dev);
256 }
257
258 if (unlikely(is_swiotlb_buffer(paddr)))
259 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200260}
261
Christoph Hellwig55897af2018-12-03 11:43:54 +0100262void dma_direct_sync_sg_for_cpu(struct device *dev,
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200263 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
264{
265 struct scatterlist *sg;
266 int i;
267
Christoph Hellwig55897af2018-12-03 11:43:54 +0100268 for_each_sg(sgl, sg, nents, i) {
269 if (!dev_is_dma_coherent(dev))
270 arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
271
272 if (unlikely(is_swiotlb_buffer(sg_phys(sg))))
273 swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length, dir,
274 SYNC_FOR_CPU);
275 }
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200276
Christoph Hellwig55897af2018-12-03 11:43:54 +0100277 if (!dev_is_dma_coherent(dev))
278 arch_sync_dma_for_cpu_all(dev);
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200279}
280
Christoph Hellwig55897af2018-12-03 11:43:54 +0100281void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200282 size_t size, enum dma_data_direction dir, unsigned long attrs)
283{
Christoph Hellwig55897af2018-12-03 11:43:54 +0100284 phys_addr_t phys = dma_to_phys(dev, addr);
285
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200286 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
287 dma_direct_sync_single_for_cpu(dev, addr, size, dir);
Christoph Hellwig55897af2018-12-03 11:43:54 +0100288
289 if (unlikely(is_swiotlb_buffer(phys)))
290 swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200291}
292
Christoph Hellwig55897af2018-12-03 11:43:54 +0100293void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200294 int nents, enum dma_data_direction dir, unsigned long attrs)
295{
Christoph Hellwig55897af2018-12-03 11:43:54 +0100296 struct scatterlist *sg;
297 int i;
298
299 for_each_sg(sgl, sg, nents, i)
300 dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir,
301 attrs);
302}
303#else
304void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
305 int nents, enum dma_data_direction dir, unsigned long attrs)
306{
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200307}
308#endif
309
Christoph Hellwig55897af2018-12-03 11:43:54 +0100310static inline bool dma_direct_possible(struct device *dev, dma_addr_t dma_addr,
311 size_t size)
312{
313 return swiotlb_force != SWIOTLB_FORCE &&
314 (!dev || dma_capable(dev, dma_addr, size));
315}
316
Christoph Hellwig782e6762018-04-16 15:24:51 +0200317dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
Christoph Hellwig002e6742018-01-09 16:30:23 +0100318 unsigned long offset, size_t size, enum dma_data_direction dir,
319 unsigned long attrs)
320{
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200321 phys_addr_t phys = page_to_phys(page) + offset;
322 dma_addr_t dma_addr = phys_to_dma(dev, phys);
Christoph Hellwig27975962018-01-09 16:30:47 +0100323
Christoph Hellwig55897af2018-12-03 11:43:54 +0100324 if (unlikely(!dma_direct_possible(dev, dma_addr, size)) &&
325 !swiotlb_map(dev, &phys, &dma_addr, size, dir, attrs)) {
Christoph Hellwig58dfd4a2018-12-03 07:43:05 +0100326 report_addr(dev, dma_addr, size);
Christoph Hellwigb0cbeae2018-11-21 18:52:35 +0100327 return DMA_MAPPING_ERROR;
Christoph Hellwig58dfd4a2018-12-03 07:43:05 +0100328 }
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200329
Christoph Hellwig55897af2018-12-03 11:43:54 +0100330 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
331 arch_sync_dma_for_device(dev, phys, size, dir);
Christoph Hellwig27975962018-01-09 16:30:47 +0100332 return dma_addr;
Christoph Hellwig002e6742018-01-09 16:30:23 +0100333}
334
Christoph Hellwig782e6762018-04-16 15:24:51 +0200335int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
336 enum dma_data_direction dir, unsigned long attrs)
Christoph Hellwig002e6742018-01-09 16:30:23 +0100337{
338 int i;
339 struct scatterlist *sg;
340
341 for_each_sg(sgl, sg, nents, i) {
Christoph Hellwig17ac5242018-12-03 11:14:09 +0100342 sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
343 sg->offset, sg->length, dir, attrs);
344 if (sg->dma_address == DMA_MAPPING_ERROR)
Christoph Hellwig55897af2018-12-03 11:43:54 +0100345 goto out_unmap;
Christoph Hellwig002e6742018-01-09 16:30:23 +0100346 sg_dma_len(sg) = sg->length;
347 }
348
349 return nents;
Christoph Hellwig55897af2018-12-03 11:43:54 +0100350
351out_unmap:
352 dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
353 return 0;
Christoph Hellwig002e6742018-01-09 16:30:23 +0100354}
355
Christoph Hellwig9d7a2242018-09-07 09:31:58 +0200356/*
357 * Because 32-bit DMA masks are so common we expect every architecture to be
358 * able to satisfy them - either by not supporting more physical memory, or by
359 * providing a ZONE_DMA32. If neither is the case, the architecture needs to
360 * use an IOMMU instead of the direct mapping.
361 */
Christoph Hellwig1a9777a2017-12-24 15:04:32 +0100362int dma_direct_supported(struct device *dev, u64 mask)
363{
Christoph Hellwig9d7a2242018-09-07 09:31:58 +0200364 u64 min_mask;
365
366 if (IS_ENABLED(CONFIG_ZONE_DMA))
367 min_mask = DMA_BIT_MASK(ARCH_ZONE_DMA_BITS);
368 else
369 min_mask = DMA_BIT_MASK(32);
370
371 min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT);
372
Alexander Duyck1fc8e642018-10-03 16:48:07 -0700373 return mask >= phys_to_dma(dev, min_mask);
Christoph Hellwig1a9777a2017-12-24 15:04:32 +0100374}
375
Christoph Hellwig002e6742018-01-09 16:30:23 +0100376const struct dma_map_ops dma_direct_ops = {
377 .alloc = dma_direct_alloc,
378 .free = dma_direct_free,
379 .map_page = dma_direct_map_page,
380 .map_sg = dma_direct_map_sg,
Christoph Hellwig55897af2018-12-03 11:43:54 +0100381#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
382 defined(CONFIG_SWIOTLB)
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200383 .sync_single_for_device = dma_direct_sync_single_for_device,
384 .sync_sg_for_device = dma_direct_sync_sg_for_device,
385#endif
386#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
Christoph Hellwig55897af2018-12-03 11:43:54 +0100387 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
388 defined(CONFIG_SWIOTLB)
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200389 .sync_single_for_cpu = dma_direct_sync_single_for_cpu,
390 .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
391 .unmap_page = dma_direct_unmap_page,
392 .unmap_sg = dma_direct_unmap_sg,
393#endif
Christoph Hellwiga20bb052018-09-20 13:26:13 +0200394 .get_required_mask = dma_direct_get_required_mask,
Christoph Hellwig1a9777a2017-12-24 15:04:32 +0100395 .dma_supported = dma_direct_supported,
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200396 .cache_sync = arch_dma_cache_sync,
Christoph Hellwig002e6742018-01-09 16:30:23 +0100397};
398EXPORT_SYMBOL(dma_direct_ops);