Greg Ungerer | 5641686 | 2012-05-02 17:06:22 +1000 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file COPYING in the main directory of this archive |
| 4 | * for more details. |
| 5 | */ |
| 6 | |
| 7 | #undef DEBUG |
| 8 | |
| 9 | #include <linux/dma-mapping.h> |
| 10 | #include <linux/device.h> |
| 11 | #include <linux/kernel.h> |
| 12 | #include <linux/scatterlist.h> |
| 13 | #include <linux/slab.h> |
| 14 | #include <linux/vmalloc.h> |
| 15 | #include <linux/export.h> |
| 16 | |
| 17 | #include <asm/pgalloc.h> |
| 18 | |
Greg Ungerer | b60f187 | 2012-06-26 21:02:54 +1000 | [diff] [blame] | 19 | #if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE) |
Greg Ungerer | 5641686 | 2012-05-02 17:06:22 +1000 | [diff] [blame] | 20 | |
Christoph Hellwig | 340f303 | 2016-01-20 15:01:53 -0800 | [diff] [blame] | 21 | static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 22 | gfp_t flag, unsigned long attrs) |
Greg Ungerer | 5641686 | 2012-05-02 17:06:22 +1000 | [diff] [blame] | 23 | { |
| 24 | struct page *page, **map; |
| 25 | pgprot_t pgprot; |
| 26 | void *addr; |
| 27 | int i, order; |
| 28 | |
| 29 | pr_debug("dma_alloc_coherent: %d,%x\n", size, flag); |
| 30 | |
| 31 | size = PAGE_ALIGN(size); |
| 32 | order = get_order(size); |
| 33 | |
| 34 | page = alloc_pages(flag, order); |
| 35 | if (!page) |
| 36 | return NULL; |
| 37 | |
| 38 | *handle = page_to_phys(page); |
| 39 | map = kmalloc(sizeof(struct page *) << order, flag & ~__GFP_DMA); |
| 40 | if (!map) { |
| 41 | __free_pages(page, order); |
| 42 | return NULL; |
| 43 | } |
| 44 | split_page(page, order); |
| 45 | |
| 46 | order = 1 << order; |
| 47 | size >>= PAGE_SHIFT; |
| 48 | map[0] = page; |
| 49 | for (i = 1; i < size; i++) |
| 50 | map[i] = page + i; |
| 51 | for (; i < order; i++) |
| 52 | __free_page(page + i); |
| 53 | pgprot = __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY); |
| 54 | if (CPU_IS_040_OR_060) |
| 55 | pgprot_val(pgprot) |= _PAGE_GLOBAL040 | _PAGE_NOCACHE_S; |
| 56 | else |
| 57 | pgprot_val(pgprot) |= _PAGE_NOCACHE030; |
| 58 | addr = vmap(map, size, VM_MAP, pgprot); |
| 59 | kfree(map); |
| 60 | |
| 61 | return addr; |
| 62 | } |
| 63 | |
Christoph Hellwig | 340f303 | 2016-01-20 15:01:53 -0800 | [diff] [blame] | 64 | static void m68k_dma_free(struct device *dev, size_t size, void *addr, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 65 | dma_addr_t handle, unsigned long attrs) |
Greg Ungerer | 5641686 | 2012-05-02 17:06:22 +1000 | [diff] [blame] | 66 | { |
| 67 | pr_debug("dma_free_coherent: %p, %x\n", addr, handle); |
| 68 | vfree(addr); |
| 69 | } |
| 70 | |
Greg Ungerer | 66d857b | 2011-03-22 13:39:27 +1000 | [diff] [blame] | 71 | #else |
Greg Ungerer | 5641686 | 2012-05-02 17:06:22 +1000 | [diff] [blame] | 72 | |
| 73 | #include <asm/cacheflush.h> |
| 74 | |
Christoph Hellwig | 340f303 | 2016-01-20 15:01:53 -0800 | [diff] [blame] | 75 | static void *m68k_dma_alloc(struct device *dev, size_t size, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 76 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
Greg Ungerer | 5641686 | 2012-05-02 17:06:22 +1000 | [diff] [blame] | 77 | { |
| 78 | void *ret; |
Greg Ungerer | 5641686 | 2012-05-02 17:06:22 +1000 | [diff] [blame] | 79 | |
| 80 | if (dev == NULL || (*dev->dma_mask < 0xffffffff)) |
| 81 | gfp |= GFP_DMA; |
| 82 | ret = (void *)__get_free_pages(gfp, get_order(size)); |
| 83 | |
| 84 | if (ret != NULL) { |
| 85 | memset(ret, 0, size); |
| 86 | *dma_handle = virt_to_phys(ret); |
| 87 | } |
| 88 | return ret; |
| 89 | } |
| 90 | |
Christoph Hellwig | 340f303 | 2016-01-20 15:01:53 -0800 | [diff] [blame] | 91 | static void m68k_dma_free(struct device *dev, size_t size, void *vaddr, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 92 | dma_addr_t dma_handle, unsigned long attrs) |
Greg Ungerer | 5641686 | 2012-05-02 17:06:22 +1000 | [diff] [blame] | 93 | { |
| 94 | free_pages((unsigned long)vaddr, get_order(size)); |
| 95 | } |
| 96 | |
Greg Ungerer | b60f187 | 2012-06-26 21:02:54 +1000 | [diff] [blame] | 97 | #endif /* CONFIG_MMU && !CONFIG_COLDFIRE */ |
Greg Ungerer | 5641686 | 2012-05-02 17:06:22 +1000 | [diff] [blame] | 98 | |
Christoph Hellwig | 340f303 | 2016-01-20 15:01:53 -0800 | [diff] [blame] | 99 | static void m68k_dma_sync_single_for_device(struct device *dev, |
| 100 | dma_addr_t handle, size_t size, enum dma_data_direction dir) |
Greg Ungerer | 5641686 | 2012-05-02 17:06:22 +1000 | [diff] [blame] | 101 | { |
| 102 | switch (dir) { |
Greg Ungerer | d2661c6 | 2012-07-10 13:50:58 +1000 | [diff] [blame] | 103 | case DMA_BIDIRECTIONAL: |
Greg Ungerer | 5641686 | 2012-05-02 17:06:22 +1000 | [diff] [blame] | 104 | case DMA_TO_DEVICE: |
| 105 | cache_push(handle, size); |
| 106 | break; |
| 107 | case DMA_FROM_DEVICE: |
| 108 | cache_clear(handle, size); |
| 109 | break; |
| 110 | default: |
Geert Uytterhoeven | 7c79e1e | 2016-12-06 19:57:37 +0100 | [diff] [blame] | 111 | pr_err_ratelimited("dma_sync_single_for_device: unsupported dir %u\n", |
| 112 | dir); |
Greg Ungerer | 5641686 | 2012-05-02 17:06:22 +1000 | [diff] [blame] | 113 | break; |
| 114 | } |
| 115 | } |
Greg Ungerer | 5641686 | 2012-05-02 17:06:22 +1000 | [diff] [blame] | 116 | |
Christoph Hellwig | 340f303 | 2016-01-20 15:01:53 -0800 | [diff] [blame] | 117 | static void m68k_dma_sync_sg_for_device(struct device *dev, |
| 118 | struct scatterlist *sglist, int nents, enum dma_data_direction dir) |
Greg Ungerer | 5641686 | 2012-05-02 17:06:22 +1000 | [diff] [blame] | 119 | { |
| 120 | int i; |
Akinobu Mita | 1214c52 | 2015-05-01 15:47:22 +0900 | [diff] [blame] | 121 | struct scatterlist *sg; |
Greg Ungerer | 5641686 | 2012-05-02 17:06:22 +1000 | [diff] [blame] | 122 | |
Akinobu Mita | 1214c52 | 2015-05-01 15:47:22 +0900 | [diff] [blame] | 123 | for_each_sg(sglist, sg, nents, i) { |
| 124 | dma_sync_single_for_device(dev, sg->dma_address, sg->length, |
| 125 | dir); |
| 126 | } |
Greg Ungerer | 5641686 | 2012-05-02 17:06:22 +1000 | [diff] [blame] | 127 | } |
Greg Ungerer | 5641686 | 2012-05-02 17:06:22 +1000 | [diff] [blame] | 128 | |
Christoph Hellwig | 340f303 | 2016-01-20 15:01:53 -0800 | [diff] [blame] | 129 | static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page, |
| 130 | unsigned long offset, size_t size, enum dma_data_direction dir, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 131 | unsigned long attrs) |
Greg Ungerer | 5641686 | 2012-05-02 17:06:22 +1000 | [diff] [blame] | 132 | { |
| 133 | dma_addr_t handle = page_to_phys(page) + offset; |
| 134 | |
Alexander Duyck | 5140d23 | 2016-12-14 15:04:49 -0800 | [diff] [blame] | 135 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
| 136 | dma_sync_single_for_device(dev, handle, size, dir); |
| 137 | |
Greg Ungerer | 5641686 | 2012-05-02 17:06:22 +1000 | [diff] [blame] | 138 | return handle; |
| 139 | } |
Greg Ungerer | 5641686 | 2012-05-02 17:06:22 +1000 | [diff] [blame] | 140 | |
Christoph Hellwig | 340f303 | 2016-01-20 15:01:53 -0800 | [diff] [blame] | 141 | static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 142 | int nents, enum dma_data_direction dir, unsigned long attrs) |
Greg Ungerer | 5641686 | 2012-05-02 17:06:22 +1000 | [diff] [blame] | 143 | { |
| 144 | int i; |
Akinobu Mita | 1214c52 | 2015-05-01 15:47:22 +0900 | [diff] [blame] | 145 | struct scatterlist *sg; |
Greg Ungerer | 5641686 | 2012-05-02 17:06:22 +1000 | [diff] [blame] | 146 | |
Akinobu Mita | 1214c52 | 2015-05-01 15:47:22 +0900 | [diff] [blame] | 147 | for_each_sg(sglist, sg, nents, i) { |
Greg Ungerer | 5641686 | 2012-05-02 17:06:22 +1000 | [diff] [blame] | 148 | sg->dma_address = sg_phys(sg); |
Alexander Duyck | 5140d23 | 2016-12-14 15:04:49 -0800 | [diff] [blame] | 149 | |
| 150 | if (attrs & DMA_ATTR_SKIP_CPU_SYNC) |
| 151 | continue; |
| 152 | |
Akinobu Mita | 1214c52 | 2015-05-01 15:47:22 +0900 | [diff] [blame] | 153 | dma_sync_single_for_device(dev, sg->dma_address, sg->length, |
| 154 | dir); |
Greg Ungerer | 5641686 | 2012-05-02 17:06:22 +1000 | [diff] [blame] | 155 | } |
| 156 | return nents; |
| 157 | } |
Christoph Hellwig | 340f303 | 2016-01-20 15:01:53 -0800 | [diff] [blame] | 158 | |
Bart Van Assche | 5299709 | 2017-01-20 13:04:01 -0800 | [diff] [blame] | 159 | const struct dma_map_ops m68k_dma_ops = { |
Christoph Hellwig | 340f303 | 2016-01-20 15:01:53 -0800 | [diff] [blame] | 160 | .alloc = m68k_dma_alloc, |
| 161 | .free = m68k_dma_free, |
| 162 | .map_page = m68k_dma_map_page, |
| 163 | .map_sg = m68k_dma_map_sg, |
| 164 | .sync_single_for_device = m68k_dma_sync_single_for_device, |
| 165 | .sync_sg_for_device = m68k_dma_sync_sg_for_device, |
| 166 | }; |
| 167 | EXPORT_SYMBOL(m68k_dma_ops); |