blob: 5828e5e01b7913bbf93a62ccec9c0634b85bfb65 [file] [log] [blame]
Christoph Hellwig545d2922020-08-18 09:30:44 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Helpers for DMA ops implementations. These generally rely on the fact that
4 * the allocated memory contains normal pages in the direct kernel mapping.
5 */
Christoph Hellwigefa70f22020-09-01 13:34:33 +02006#include <linux/dma-contiguous.h>
Christoph Hellwig545d2922020-08-18 09:30:44 +02007#include <linux/dma-noncoherent.h>
8
9/*
10 * Create scatter-list for the already allocated DMA buffer.
11 */
12int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
13 void *cpu_addr, dma_addr_t dma_addr, size_t size,
14 unsigned long attrs)
15{
16 struct page *page = virt_to_page(cpu_addr);
17 int ret;
18
19 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
20 if (!ret)
21 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
22 return ret;
23}
24
25/*
26 * Create userspace mapping for the DMA-coherent memory.
27 */
28int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
29 void *cpu_addr, dma_addr_t dma_addr, size_t size,
30 unsigned long attrs)
31{
32#ifdef CONFIG_MMU
33 unsigned long user_count = vma_pages(vma);
34 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
35 unsigned long off = vma->vm_pgoff;
36 int ret = -ENXIO;
37
38 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
39
40 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
41 return ret;
42
43 if (off >= count || user_count > count - off)
44 return -ENXIO;
45
46 return remap_pfn_range(vma, vma->vm_start,
47 page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff,
48 user_count << PAGE_SHIFT, vma->vm_page_prot);
49#else
50 return -ENXIO;
51#endif /* CONFIG_MMU */
52}
Christoph Hellwigefa70f22020-09-01 13:34:33 +020053
54struct page *dma_common_alloc_pages(struct device *dev, size_t size,
55 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
56{
57 const struct dma_map_ops *ops = get_dma_ops(dev);
58 struct page *page;
59
60 page = dma_alloc_contiguous(dev, size, gfp);
61 if (!page)
62 page = alloc_pages_node(dev_to_node(dev), gfp, get_order(size));
63 if (!page)
64 return NULL;
65
66 *dma_handle = ops->map_page(dev, page, 0, size, dir,
67 DMA_ATTR_SKIP_CPU_SYNC);
68 if (*dma_handle == DMA_MAPPING_ERROR) {
69 dma_free_contiguous(dev, page, size);
70 return NULL;
71 }
72
73 memset(page_address(page), 0, size);
74 return page;
75}
76
77void dma_common_free_pages(struct device *dev, size_t size, struct page *page,
78 dma_addr_t dma_handle, enum dma_data_direction dir)
79{
80 const struct dma_map_ops *ops = get_dma_ops(dev);
81
82 if (ops->unmap_page)
83 ops->unmap_page(dev, dma_handle, size, dir,
84 DMA_ATTR_SKIP_CPU_SYNC);
85 dma_free_contiguous(dev, page, size);
86}