blob: f04a424f91fac76b0aaad1a87676cb4aae205328 [file] [log] [blame]
Christoph Hellwig002e6742018-01-09 16:30:23 +01001// SPDX-License-Identifier: GPL-2.0
2/*
Christoph Hellwig2e86a042017-12-22 11:29:51 +01003 * DMA operations that map physical memory directly without using an IOMMU or
4 * flushing caches.
Christoph Hellwig002e6742018-01-09 16:30:23 +01005 */
6#include <linux/export.h>
7#include <linux/mm.h>
Christoph Hellwig2e86a042017-12-22 11:29:51 +01008#include <linux/dma-direct.h>
Christoph Hellwig002e6742018-01-09 16:30:23 +01009#include <linux/scatterlist.h>
Christoph Hellwig080321d2017-12-22 11:51:44 +010010#include <linux/dma-contiguous.h>
Christoph Hellwig002e6742018-01-09 16:30:23 +010011#include <linux/pfn.h>
12
Christoph Hellwig27975962018-01-09 16:30:47 +010013#define DIRECT_MAPPING_ERROR 0
14
15static bool
16check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
17 const char *caller)
18{
19 if (unlikely(dev && !dma_capable(dev, dma_addr, size))) {
20 if (*dev->dma_mask >= DMA_BIT_MASK(32)) {
21 dev_err(dev,
22 "%s: overflow %pad+%zu of device mask %llx\n",
23 caller, &dma_addr, size, *dev->dma_mask);
24 }
25 return false;
26 }
27 return true;
28}
29
Christoph Hellwig002e6742018-01-09 16:30:23 +010030static void *dma_direct_alloc(struct device *dev, size_t size,
31 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
32{
Christoph Hellwig080321d2017-12-22 11:51:44 +010033 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
34 int page_order = get_order(size);
35 struct page *page = NULL;
Christoph Hellwig002e6742018-01-09 16:30:23 +010036
Christoph Hellwig080321d2017-12-22 11:51:44 +010037 /* CMA can be used only in the context which permits sleeping */
38 if (gfpflags_allow_blocking(gfp))
39 page = dma_alloc_from_contiguous(dev, count, page_order, gfp);
40 if (!page)
Christoph Hellwig21f237e2017-12-22 11:55:23 +010041 page = alloc_pages_node(dev_to_node(dev), gfp, page_order);
Christoph Hellwig080321d2017-12-22 11:51:44 +010042 if (!page)
43 return NULL;
Christoph Hellwig002e6742018-01-09 16:30:23 +010044
Christoph Hellwig080321d2017-12-22 11:51:44 +010045 *dma_handle = phys_to_dma(dev, page_to_phys(page));
46 memset(page_address(page), 0, size);
47 return page_address(page);
Christoph Hellwig002e6742018-01-09 16:30:23 +010048}
49
50static void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
51 dma_addr_t dma_addr, unsigned long attrs)
52{
Christoph Hellwig080321d2017-12-22 11:51:44 +010053 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
54
55 if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
56 free_pages((unsigned long)cpu_addr, get_order(size));
Christoph Hellwig002e6742018-01-09 16:30:23 +010057}
58
59static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
60 unsigned long offset, size_t size, enum dma_data_direction dir,
61 unsigned long attrs)
62{
Christoph Hellwig27975962018-01-09 16:30:47 +010063 dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset;
64
65 if (!check_addr(dev, dma_addr, size, __func__))
66 return DIRECT_MAPPING_ERROR;
67 return dma_addr;
Christoph Hellwig002e6742018-01-09 16:30:23 +010068}
69
70static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
71 int nents, enum dma_data_direction dir, unsigned long attrs)
72{
73 int i;
74 struct scatterlist *sg;
75
76 for_each_sg(sgl, sg, nents, i) {
Christoph Hellwig002e6742018-01-09 16:30:23 +010077 BUG_ON(!sg_page(sg));
Christoph Hellwig2e86a042017-12-22 11:29:51 +010078
79 sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg));
Christoph Hellwig27975962018-01-09 16:30:47 +010080 if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__))
81 return 0;
Christoph Hellwig002e6742018-01-09 16:30:23 +010082 sg_dma_len(sg) = sg->length;
83 }
84
85 return nents;
86}
87
Christoph Hellwig27975962018-01-09 16:30:47 +010088static int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
89{
90 return dma_addr == DIRECT_MAPPING_ERROR;
91}
92
Christoph Hellwig002e6742018-01-09 16:30:23 +010093const struct dma_map_ops dma_direct_ops = {
94 .alloc = dma_direct_alloc,
95 .free = dma_direct_free,
96 .map_page = dma_direct_map_page,
97 .map_sg = dma_direct_map_sg,
Christoph Hellwig27975962018-01-09 16:30:47 +010098 .mapping_error = dma_direct_mapping_error,
Christoph Hellwig002e6742018-01-09 16:30:23 +010099};
100EXPORT_SYMBOL(dma_direct_ops);