blob: 7072341995ff23651e6ac748a58d314f2e831199 [file] [log] [blame]
Mikael Starvik51533b62005-07-27 11:44:44 -07001/*
2 * Dynamic DMA mapping support.
3 *
4 * On cris there is no hardware dynamic DMA address translation,
5 * so consistent alloc/free are merely page allocation/freeing.
6 * The rest of the dynamic DMA mapping interface is implemented
7 * in asm/pci.h.
8 *
9 * Borrowed from i386.
10 */
11
12#include <linux/types.h>
13#include <linux/mm.h>
14#include <linux/string.h>
15#include <linux/pci.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/gfp.h>
Mikael Starvik51533b62005-07-27 11:44:44 -070017#include <asm/io.h>
18
Christoph Hellwige20dd882016-01-20 15:01:38 -080019static void *v32_dma_alloc(struct device *dev, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070020 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
Mikael Starvik51533b62005-07-27 11:44:44 -070021{
22 void *ret;
Christoph Hellwige20dd882016-01-20 15:01:38 -080023
Mikael Starvik51533b62005-07-27 11:44:44 -070024 /* ignore region specifiers */
25 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
26
Mikael Starvik51533b62005-07-27 11:44:44 -070027 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
28 gfp |= GFP_DMA;
29
Christoph Hellwige20dd882016-01-20 15:01:38 -080030 ret = (void *)__get_free_pages(gfp, get_order(size));
Mikael Starvik51533b62005-07-27 11:44:44 -070031
32 if (ret != NULL) {
33 memset(ret, 0, size);
34 *dma_handle = virt_to_phys(ret);
35 }
36 return ret;
37}
38
Christoph Hellwige20dd882016-01-20 15:01:38 -080039static void v32_dma_free(struct device *dev, size_t size, void *vaddr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070040 dma_addr_t dma_handle, unsigned long attrs)
Mikael Starvik51533b62005-07-27 11:44:44 -070041{
Christoph Hellwige20dd882016-01-20 15:01:38 -080042 free_pages((unsigned long)vaddr, get_order(size));
Mikael Starvik51533b62005-07-27 11:44:44 -070043}
44
Christoph Hellwige20dd882016-01-20 15:01:38 -080045static inline dma_addr_t v32_dma_map_page(struct device *dev,
46 struct page *page, unsigned long offset, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070047 enum dma_data_direction direction, unsigned long attrs)
Christoph Hellwige20dd882016-01-20 15:01:38 -080048{
49 return page_to_phys(page) + offset;
50}
51
52static inline int v32_dma_map_sg(struct device *dev, struct scatterlist *sg,
53 int nents, enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070054 unsigned long attrs)
Christoph Hellwige20dd882016-01-20 15:01:38 -080055{
56 printk("Map sg\n");
57 return nents;
58}
59
60static inline int v32_dma_supported(struct device *dev, u64 mask)
61{
62 /*
63 * we fall back to GFP_DMA when the mask isn't all 1s,
64 * so we can't guarantee allocations that must be
65 * within a tighter range than GFP_DMA..
66 */
67 if (mask < 0x00ffffff)
68 return 0;
69 return 1;
70}
71
Bart Van Assche52997092017-01-20 13:04:01 -080072const struct dma_map_ops v32_dma_ops = {
Christoph Hellwige20dd882016-01-20 15:01:38 -080073 .alloc = v32_dma_alloc,
74 .free = v32_dma_free,
75 .map_page = v32_dma_map_page,
76 .map_sg = v32_dma_map_sg,
77 .dma_supported = v32_dma_supported,
78};
79EXPORT_SYMBOL(v32_dma_ops);