blob: 12ff766ec1fa35f20b1bff80c85593fe8342271d [file] [log] [blame]
Greg Kroah-Hartman989d42e2017-11-07 17:30:07 +01001// SPDX-License-Identifier: GPL-2.0
Tejun Heo9ac78492007-01-20 16:00:26 +09002/*
Christoph Hellwigcf65a0f2018-06-12 19:01:45 +02003 * arch-independent dma-mapping routines
Tejun Heo9ac78492007-01-20 16:00:26 +09004 *
5 * Copyright (c) 2006 SUSE Linux Products GmbH
6 * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
Tejun Heo9ac78492007-01-20 16:00:26 +09007 */
Christoph Hellwig05887cb2018-12-06 12:25:54 -08008#include <linux/memblock.h> /* for max_pfn */
Sricharan R09515ef2017-04-10 16:51:01 +05309#include <linux/acpi.h>
Christoph Hellwig356da6d2018-12-06 13:39:32 -080010#include <linux/dma-direct.h>
Christoph Hellwig58b04402018-09-11 08:55:28 +020011#include <linux/dma-noncoherent.h>
Paul Gortmaker1b6bc322011-05-27 07:12:15 -040012#include <linux/export.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/gfp.h>
Sricharan R09515ef2017-04-10 16:51:01 +053014#include <linux/of_device.h>
Laura Abbott513510d2014-10-09 15:26:40 -070015#include <linux/slab.h>
16#include <linux/vmalloc.h>
Tejun Heo9ac78492007-01-20 16:00:26 +090017
18/*
19 * Managed DMA API
20 */
21struct dma_devres {
22 size_t size;
23 void *vaddr;
24 dma_addr_t dma_handle;
Christoph Hellwig63d36c92017-06-12 19:15:04 +020025 unsigned long attrs;
Tejun Heo9ac78492007-01-20 16:00:26 +090026};
27
Christoph Hellwig63d36c92017-06-12 19:15:04 +020028static void dmam_release(struct device *dev, void *res)
Tejun Heo9ac78492007-01-20 16:00:26 +090029{
30 struct dma_devres *this = res;
31
Christoph Hellwig63d36c92017-06-12 19:15:04 +020032 dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
33 this->attrs);
Tejun Heo9ac78492007-01-20 16:00:26 +090034}
35
36static int dmam_match(struct device *dev, void *res, void *match_data)
37{
38 struct dma_devres *this = res, *match = match_data;
39
40 if (this->vaddr == match->vaddr) {
41 WARN_ON(this->size != match->size ||
42 this->dma_handle != match->dma_handle);
43 return 1;
44 }
45 return 0;
46}
47
48/**
Tejun Heo9ac78492007-01-20 16:00:26 +090049 * dmam_free_coherent - Managed dma_free_coherent()
50 * @dev: Device to free coherent memory for
51 * @size: Size of allocation
52 * @vaddr: Virtual address of the memory to free
53 * @dma_handle: DMA handle of the memory to free
54 *
55 * Managed dma_free_coherent().
56 */
57void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
58 dma_addr_t dma_handle)
59{
60 struct dma_devres match_data = { size, vaddr, dma_handle };
61
62 dma_free_coherent(dev, size, vaddr, dma_handle);
Christoph Hellwig63d36c92017-06-12 19:15:04 +020063 WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
Tejun Heo9ac78492007-01-20 16:00:26 +090064}
65EXPORT_SYMBOL(dmam_free_coherent);
66
67/**
Christoph Hellwig63d36c92017-06-12 19:15:04 +020068 * dmam_alloc_attrs - Managed dma_alloc_attrs()
Tejun Heo9ac78492007-01-20 16:00:26 +090069 * @dev: Device to allocate non_coherent memory for
70 * @size: Size of allocation
71 * @dma_handle: Out argument for allocated DMA handle
72 * @gfp: Allocation flags
Christoph Hellwig63d36c92017-06-12 19:15:04 +020073 * @attrs: Flags in the DMA_ATTR_* namespace.
Tejun Heo9ac78492007-01-20 16:00:26 +090074 *
Christoph Hellwig63d36c92017-06-12 19:15:04 +020075 * Managed dma_alloc_attrs(). Memory allocated using this function will be
76 * automatically released on driver detach.
Tejun Heo9ac78492007-01-20 16:00:26 +090077 *
78 * RETURNS:
79 * Pointer to allocated memory on success, NULL on failure.
80 */
Christoph Hellwig63d36c92017-06-12 19:15:04 +020081void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
82 gfp_t gfp, unsigned long attrs)
Tejun Heo9ac78492007-01-20 16:00:26 +090083{
84 struct dma_devres *dr;
85 void *vaddr;
86
Christoph Hellwig63d36c92017-06-12 19:15:04 +020087 dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
Tejun Heo9ac78492007-01-20 16:00:26 +090088 if (!dr)
89 return NULL;
90
Christoph Hellwig63d36c92017-06-12 19:15:04 +020091 vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
Tejun Heo9ac78492007-01-20 16:00:26 +090092 if (!vaddr) {
93 devres_free(dr);
94 return NULL;
95 }
96
97 dr->vaddr = vaddr;
98 dr->dma_handle = *dma_handle;
99 dr->size = size;
Christoph Hellwig63d36c92017-06-12 19:15:04 +0200100 dr->attrs = attrs;
Tejun Heo9ac78492007-01-20 16:00:26 +0900101
102 devres_add(dev, dr);
103
104 return vaddr;
105}
Christoph Hellwig63d36c92017-06-12 19:15:04 +0200106EXPORT_SYMBOL(dmam_alloc_attrs);
Tejun Heo9ac78492007-01-20 16:00:26 +0900107
Marek Szyprowskid2b74282012-06-13 10:05:52 +0200108/*
109 * Create scatter-list for the already allocated DMA buffer.
110 */
111int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
Christoph Hellwig9406a492018-08-23 09:39:38 +0200112 void *cpu_addr, dma_addr_t dma_addr, size_t size,
113 unsigned long attrs)
Marek Szyprowskid2b74282012-06-13 10:05:52 +0200114{
Christoph Hellwig34dc0ea2019-10-29 11:01:37 +0100115 struct page *page = virt_to_page(cpu_addr);
Marek Szyprowskid2b74282012-06-13 10:05:52 +0200116 int ret;
117
Christoph Hellwig9406a492018-08-23 09:39:38 +0200118 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
119 if (!ret)
120 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
121 return ret;
Marek Szyprowskid2b74282012-06-13 10:05:52 +0200122}
Christoph Hellwig7249c1a2018-12-06 12:43:30 -0800123
Christoph Hellwig14451462019-03-15 17:56:43 +0100124/*
125 * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
126 * that the intention is to allow exporting memory allocated via the
127 * coherent DMA APIs through the dma_buf API, which only accepts a
128 * scattertable. This presents a couple of problems:
129 * 1. Not all memory allocated via the coherent DMA APIs is backed by
130 * a struct page
131 * 2. Passing coherent DMA memory into the streaming APIs is not allowed
132 * as we will try to flush the memory through a different alias to that
133 * actually being used (and the flushes are redundant.)
134 */
Christoph Hellwig7249c1a2018-12-06 12:43:30 -0800135int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
136 void *cpu_addr, dma_addr_t dma_addr, size_t size,
137 unsigned long attrs)
138{
139 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwig356da6d2018-12-06 13:39:32 -0800140
Christoph Hellwigf9f32322019-08-06 15:01:50 +0300141 if (dma_is_direct(ops))
Christoph Hellwig34dc0ea2019-10-29 11:01:37 +0100142 return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr,
Christoph Hellwigf9f32322019-08-06 15:01:50 +0300143 size, attrs);
144 if (!ops->get_sgtable)
145 return -ENXIO;
146 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs);
Christoph Hellwig7249c1a2018-12-06 12:43:30 -0800147}
148EXPORT_SYMBOL(dma_get_sgtable_attrs);
Marek Szyprowskid2b74282012-06-13 10:05:52 +0200149
Christoph Hellwig33dcb372019-07-26 09:26:40 +0200150#ifdef CONFIG_MMU
151/*
152 * Return the page attributes used for mapping dma_alloc_* memory, either in
153 * kernel space if remapping is needed, or to userspace through dma_mmap_*.
154 */
155pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
156{
157 if (dev_is_dma_coherent(dev) ||
158 (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) &&
159 (attrs & DMA_ATTR_NON_CONSISTENT)))
160 return prot;
Christoph Hellwig419e2f12019-08-26 09:03:44 +0200161#ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
162 if (attrs & DMA_ATTR_WRITE_COMBINE)
163 return pgprot_writecombine(prot);
164#endif
165 return pgprot_dmacoherent(prot);
Christoph Hellwig33dcb372019-07-26 09:26:40 +0200166}
167#endif /* CONFIG_MMU */
168
Marek Szyprowski64ccc9c2012-06-14 13:03:04 +0200169/*
170 * Create userspace mapping for the DMA-coherent memory.
171 */
172int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
Christoph Hellwig58b04402018-09-11 08:55:28 +0200173 void *cpu_addr, dma_addr_t dma_addr, size_t size,
174 unsigned long attrs)
Marek Szyprowski64ccc9c2012-06-14 13:03:04 +0200175{
Christoph Hellwig62fcee92019-08-06 15:06:40 +0300176#ifdef CONFIG_MMU
Muhammad Falak R Wani95da00e2016-05-21 18:52:22 +0530177 unsigned long user_count = vma_pages(vma);
Marek Szyprowski64ccc9c2012-06-14 13:03:04 +0200178 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
Marek Szyprowski64ccc9c2012-06-14 13:03:04 +0200179 unsigned long off = vma->vm_pgoff;
Christoph Hellwig58b04402018-09-11 08:55:28 +0200180 int ret = -ENXIO;
Marek Szyprowski64ccc9c2012-06-14 13:03:04 +0200181
Christoph Hellwig33dcb372019-07-26 09:26:40 +0200182 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
Marek Szyprowski64ccc9c2012-06-14 13:03:04 +0200183
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100184 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
Marek Szyprowski64ccc9c2012-06-14 13:03:04 +0200185 return ret;
186
Christoph Hellwig58b04402018-09-11 08:55:28 +0200187 if (off >= count || user_count > count - off)
188 return -ENXIO;
Marek Szyprowski64ccc9c2012-06-14 13:03:04 +0200189
Christoph Hellwig34dc0ea2019-10-29 11:01:37 +0100190 return remap_pfn_range(vma, vma->vm_start,
191 page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff,
Christoph Hellwig58b04402018-09-11 08:55:28 +0200192 user_count << PAGE_SHIFT, vma->vm_page_prot);
193#else
194 return -ENXIO;
Christoph Hellwig62fcee92019-08-06 15:06:40 +0300195#endif /* CONFIG_MMU */
Marek Szyprowski64ccc9c2012-06-14 13:03:04 +0200196}
Christoph Hellwig7249c1a2018-12-06 12:43:30 -0800197
198/**
Christoph Hellwige29ccc12019-08-03 13:31:25 +0300199 * dma_can_mmap - check if a given device supports dma_mmap_*
200 * @dev: device to check
201 *
202 * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to
203 * map DMA allocations to userspace.
204 */
205bool dma_can_mmap(struct device *dev)
206{
207 const struct dma_map_ops *ops = get_dma_ops(dev);
208
Christoph Hellwig34dc0ea2019-10-29 11:01:37 +0100209 if (dma_is_direct(ops))
210 return dma_direct_can_mmap(dev);
Christoph Hellwige29ccc12019-08-03 13:31:25 +0300211 return ops->mmap != NULL;
212}
213EXPORT_SYMBOL_GPL(dma_can_mmap);
214
215/**
Christoph Hellwig7249c1a2018-12-06 12:43:30 -0800216 * dma_mmap_attrs - map a coherent DMA allocation into user space
217 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
218 * @vma: vm_area_struct describing requested user mapping
219 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
220 * @dma_addr: device-view address returned from dma_alloc_attrs
221 * @size: size of memory originally requested in dma_alloc_attrs
222 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
223 *
224 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user
225 * space. The coherent DMA buffer must not be freed by the driver until the
226 * user space mapping has been released.
227 */
228int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
229 void *cpu_addr, dma_addr_t dma_addr, size_t size,
230 unsigned long attrs)
231{
232 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwig356da6d2018-12-06 13:39:32 -0800233
Christoph Hellwigf9f32322019-08-06 15:01:50 +0300234 if (dma_is_direct(ops))
Christoph Hellwig34dc0ea2019-10-29 11:01:37 +0100235 return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size,
Christoph Hellwigf9f32322019-08-06 15:01:50 +0300236 attrs);
237 if (!ops->mmap)
238 return -ENXIO;
239 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
Christoph Hellwig7249c1a2018-12-06 12:43:30 -0800240}
241EXPORT_SYMBOL(dma_mmap_attrs);
Christoph Hellwig05887cb2018-12-06 12:25:54 -0800242
Christoph Hellwig05887cb2018-12-06 12:25:54 -0800243u64 dma_get_required_mask(struct device *dev)
244{
245 const struct dma_map_ops *ops = get_dma_ops(dev);
246
Christoph Hellwig356da6d2018-12-06 13:39:32 -0800247 if (dma_is_direct(ops))
248 return dma_direct_get_required_mask(dev);
Christoph Hellwig05887cb2018-12-06 12:25:54 -0800249 if (ops->get_required_mask)
250 return ops->get_required_mask(dev);
Christoph Hellwig249baa52019-08-06 15:01:38 +0300251
252 /*
253 * We require every DMA ops implementation to at least support a 32-bit
254 * DMA mask (and use bounce buffering if that isn't supported in
255 * hardware). As the direct mapping code has its own routine to
256 * actually report an optimal mask we default to 32-bit here as that
257 * is the right thing for most IOMMUs, and at least not actively
258 * harmful in general.
259 */
260 return DMA_BIT_MASK(32);
Christoph Hellwig05887cb2018-12-06 12:25:54 -0800261}
262EXPORT_SYMBOL_GPL(dma_get_required_mask);
Christoph Hellwig05887cb2018-12-06 12:25:54 -0800263
Christoph Hellwig7249c1a2018-12-06 12:43:30 -0800264void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
265 gfp_t flag, unsigned long attrs)
266{
267 const struct dma_map_ops *ops = get_dma_ops(dev);
268 void *cpu_addr;
269
Dan Carpenter148a97d2019-04-24 17:24:37 +0300270 WARN_ON_ONCE(!dev->coherent_dma_mask);
Christoph Hellwig7249c1a2018-12-06 12:43:30 -0800271
272 if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
273 return cpu_addr;
274
275 /* let the implementation decide on the zone to allocate from: */
276 flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
277
Christoph Hellwig356da6d2018-12-06 13:39:32 -0800278 if (dma_is_direct(ops))
279 cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
280 else if (ops->alloc)
281 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
282 else
Christoph Hellwig7249c1a2018-12-06 12:43:30 -0800283 return NULL;
284
Christoph Hellwig7249c1a2018-12-06 12:43:30 -0800285 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
286 return cpu_addr;
287}
288EXPORT_SYMBOL(dma_alloc_attrs);
289
290void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
291 dma_addr_t dma_handle, unsigned long attrs)
292{
293 const struct dma_map_ops *ops = get_dma_ops(dev);
294
Christoph Hellwig7249c1a2018-12-06 12:43:30 -0800295 if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
296 return;
297 /*
298 * On non-coherent platforms which implement DMA-coherent buffers via
299 * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
300 * this far in IRQ context is a) at risk of a BUG_ON() or trying to
301 * sleep on some machines, and b) an indication that the driver is
302 * probably misusing the coherent API anyway.
303 */
304 WARN_ON(irqs_disabled());
305
Christoph Hellwig356da6d2018-12-06 13:39:32 -0800306 if (!cpu_addr)
Christoph Hellwig7249c1a2018-12-06 12:43:30 -0800307 return;
308
309 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
Christoph Hellwig356da6d2018-12-06 13:39:32 -0800310 if (dma_is_direct(ops))
311 dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
312 else if (ops->free)
313 ops->free(dev, size, cpu_addr, dma_handle, attrs);
Christoph Hellwig7249c1a2018-12-06 12:43:30 -0800314}
315EXPORT_SYMBOL(dma_free_attrs);
316
Christoph Hellwig7249c1a2018-12-06 12:43:30 -0800317int dma_supported(struct device *dev, u64 mask)
318{
319 const struct dma_map_ops *ops = get_dma_ops(dev);
320
Christoph Hellwig356da6d2018-12-06 13:39:32 -0800321 if (dma_is_direct(ops))
322 return dma_direct_supported(dev, mask);
Thierry Reding8b1cce92018-12-20 17:35:47 +0100323 if (!ops->dma_supported)
Christoph Hellwig7249c1a2018-12-06 12:43:30 -0800324 return 1;
325 return ops->dma_supported(dev, mask);
326}
327EXPORT_SYMBOL(dma_supported);
328
Christoph Hellwig11ddce12019-02-13 08:01:22 +0100329#ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
330void arch_dma_set_mask(struct device *dev, u64 mask);
331#else
332#define arch_dma_set_mask(dev, mask) do { } while (0)
333#endif
334
Christoph Hellwig7249c1a2018-12-06 12:43:30 -0800335int dma_set_mask(struct device *dev, u64 mask)
336{
Christoph Hellwig4a54d162019-04-29 09:16:42 -0500337 /*
338 * Truncate the mask to the actually supported dma_addr_t width to
339 * avoid generating unsupportable addresses.
340 */
341 mask = (dma_addr_t)mask;
342
Christoph Hellwig7249c1a2018-12-06 12:43:30 -0800343 if (!dev->dma_mask || !dma_supported(dev, mask))
344 return -EIO;
345
Christoph Hellwig11ddce12019-02-13 08:01:22 +0100346 arch_dma_set_mask(dev, mask);
Christoph Hellwig7249c1a2018-12-06 12:43:30 -0800347 *dev->dma_mask = mask;
348 return 0;
349}
350EXPORT_SYMBOL(dma_set_mask);
Christoph Hellwig7249c1a2018-12-06 12:43:30 -0800351
352#ifndef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
353int dma_set_coherent_mask(struct device *dev, u64 mask)
354{
Christoph Hellwig4a54d162019-04-29 09:16:42 -0500355 /*
356 * Truncate the mask to the actually supported dma_addr_t width to
357 * avoid generating unsupportable addresses.
358 */
359 mask = (dma_addr_t)mask;
360
Christoph Hellwig7249c1a2018-12-06 12:43:30 -0800361 if (!dma_supported(dev, mask))
362 return -EIO;
363
Christoph Hellwig7249c1a2018-12-06 12:43:30 -0800364 dev->coherent_dma_mask = mask;
365 return 0;
366}
367EXPORT_SYMBOL(dma_set_coherent_mask);
368#endif
Christoph Hellwig8ddbe592018-12-06 12:47:50 -0800369
370void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
371 enum dma_data_direction dir)
372{
373 const struct dma_map_ops *ops = get_dma_ops(dev);
374
375 BUG_ON(!valid_dma_direction(dir));
Christoph Hellwig356da6d2018-12-06 13:39:32 -0800376
377 if (dma_is_direct(ops))
378 arch_dma_cache_sync(dev, vaddr, size, dir);
379 else if (ops->cache_sync)
Christoph Hellwig8ddbe592018-12-06 12:47:50 -0800380 ops->cache_sync(dev, vaddr, size, dir);
381}
382EXPORT_SYMBOL(dma_cache_sync);
Joerg Roedel133d6242019-02-07 12:59:15 +0100383
384size_t dma_max_mapping_size(struct device *dev)
385{
386 const struct dma_map_ops *ops = get_dma_ops(dev);
387 size_t size = SIZE_MAX;
388
389 if (dma_is_direct(ops))
390 size = dma_direct_max_mapping_size(dev);
391 else if (ops && ops->max_mapping_size)
392 size = ops->max_mapping_size(dev);
393
394 return size;
395}
396EXPORT_SYMBOL_GPL(dma_max_mapping_size);
Yoshihiro Shimoda6ba99412019-08-28 21:35:40 +0900397
398unsigned long dma_get_merge_boundary(struct device *dev)
399{
400 const struct dma_map_ops *ops = get_dma_ops(dev);
401
402 if (!ops || !ops->get_merge_boundary)
403 return 0; /* can't merge */
404
405 return ops->get_merge_boundary(dev);
406}
407EXPORT_SYMBOL_GPL(dma_get_merge_boundary);