Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Dynamic DMA mapping support. |
| 3 | * |
| 4 | * On cris there is no hardware dynamic DMA address translation, |
| 5 | * so consistent alloc/free are merely page allocation/freeing. |
| 6 | * The rest of the dynamic DMA mapping interface is implemented |
| 7 | * in asm/pci.h. |
| 8 | * |
| 9 | * Borrowed from i386. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/types.h> |
| 13 | #include <linux/mm.h> |
| 14 | #include <linux/string.h> |
| 15 | #include <linux/pci.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 16 | #include <linux/gfp.h> |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 17 | #include <asm/io.h> |
| 18 | |
Christoph Hellwig | e20dd88 | 2016-01-20 15:01:38 -0800 | [diff] [blame] | 19 | static void *v32_dma_alloc(struct device *dev, size_t size, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 20 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 21 | { |
| 22 | void *ret; |
Christoph Hellwig | e20dd88 | 2016-01-20 15:01:38 -0800 | [diff] [blame] | 23 | |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 24 | /* ignore region specifiers */ |
| 25 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); |
| 26 | |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 27 | if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) |
| 28 | gfp |= GFP_DMA; |
| 29 | |
Christoph Hellwig | e20dd88 | 2016-01-20 15:01:38 -0800 | [diff] [blame] | 30 | ret = (void *)__get_free_pages(gfp, get_order(size)); |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 31 | |
| 32 | if (ret != NULL) { |
| 33 | memset(ret, 0, size); |
| 34 | *dma_handle = virt_to_phys(ret); |
| 35 | } |
| 36 | return ret; |
| 37 | } |
| 38 | |
Christoph Hellwig | e20dd88 | 2016-01-20 15:01:38 -0800 | [diff] [blame] | 39 | static void v32_dma_free(struct device *dev, size_t size, void *vaddr, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 40 | dma_addr_t dma_handle, unsigned long attrs) |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 41 | { |
Christoph Hellwig | e20dd88 | 2016-01-20 15:01:38 -0800 | [diff] [blame] | 42 | free_pages((unsigned long)vaddr, get_order(size)); |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 43 | } |
| 44 | |
Christoph Hellwig | e20dd88 | 2016-01-20 15:01:38 -0800 | [diff] [blame] | 45 | static inline dma_addr_t v32_dma_map_page(struct device *dev, |
| 46 | struct page *page, unsigned long offset, size_t size, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 47 | enum dma_data_direction direction, unsigned long attrs) |
Christoph Hellwig | e20dd88 | 2016-01-20 15:01:38 -0800 | [diff] [blame] | 48 | { |
| 49 | return page_to_phys(page) + offset; |
| 50 | } |
| 51 | |
| 52 | static inline int v32_dma_map_sg(struct device *dev, struct scatterlist *sg, |
| 53 | int nents, enum dma_data_direction direction, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 54 | unsigned long attrs) |
Christoph Hellwig | e20dd88 | 2016-01-20 15:01:38 -0800 | [diff] [blame] | 55 | { |
| 56 | printk("Map sg\n"); |
| 57 | return nents; |
| 58 | } |
| 59 | |
| 60 | static inline int v32_dma_supported(struct device *dev, u64 mask) |
| 61 | { |
| 62 | /* |
| 63 | * we fall back to GFP_DMA when the mask isn't all 1s, |
| 64 | * so we can't guarantee allocations that must be |
| 65 | * within a tighter range than GFP_DMA.. |
| 66 | */ |
| 67 | if (mask < 0x00ffffff) |
| 68 | return 0; |
| 69 | return 1; |
| 70 | } |
| 71 | |
Bart Van Assche | 5299709 | 2017-01-20 13:04:01 -0800 | [diff] [blame] | 72 | const struct dma_map_ops v32_dma_ops = { |
Christoph Hellwig | e20dd88 | 2016-01-20 15:01:38 -0800 | [diff] [blame] | 73 | .alloc = v32_dma_alloc, |
| 74 | .free = v32_dma_free, |
| 75 | .map_page = v32_dma_map_page, |
| 76 | .map_sg = v32_dma_map_sg, |
| 77 | .dma_supported = v32_dma_supported, |
| 78 | }; |
| 79 | EXPORT_SYMBOL(v32_dma_ops); |