Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch> |
| 3 | * Copyright (C) 2009 Wind River Systems Inc |
| 4 | * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com |
| 5 | * |
| 6 | * Based on DMA code from MIPS. |
| 7 | * |
| 8 | * This file is subject to the terms and conditions of the GNU General Public |
| 9 | * License. See the file "COPYING" in the main directory of this archive |
| 10 | * for more details. |
| 11 | */ |
| 12 | |
| 13 | #include <linux/types.h> |
| 14 | #include <linux/mm.h> |
| 15 | #include <linux/export.h> |
| 16 | #include <linux/string.h> |
| 17 | #include <linux/scatterlist.h> |
| 18 | #include <linux/dma-mapping.h> |
| 19 | #include <linux/io.h> |
| 20 | #include <linux/cache.h> |
| 21 | #include <asm/cacheflush.h> |
| 22 | |
Christoph Hellwig | 5a1a67f | 2016-01-20 15:01:41 -0800 | [diff] [blame] | 23 | static inline void __dma_sync_for_device(void *vaddr, size_t size, |
| 24 | enum dma_data_direction direction) |
| 25 | { |
| 26 | switch (direction) { |
| 27 | case DMA_FROM_DEVICE: |
| 28 | invalidate_dcache_range((unsigned long)vaddr, |
| 29 | (unsigned long)(vaddr + size)); |
| 30 | break; |
| 31 | case DMA_TO_DEVICE: |
| 32 | /* |
| 33 | * We just need to flush the caches here , but Nios2 flush |
| 34 | * instruction will do both writeback and invalidate. |
| 35 | */ |
| 36 | case DMA_BIDIRECTIONAL: /* flush and invalidate */ |
| 37 | flush_dcache_range((unsigned long)vaddr, |
| 38 | (unsigned long)(vaddr + size)); |
| 39 | break; |
| 40 | default: |
| 41 | BUG(); |
| 42 | } |
| 43 | } |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 44 | |
Christoph Hellwig | 5a1a67f | 2016-01-20 15:01:41 -0800 | [diff] [blame] | 45 | static inline void __dma_sync_for_cpu(void *vaddr, size_t size, |
| 46 | enum dma_data_direction direction) |
| 47 | { |
| 48 | switch (direction) { |
| 49 | case DMA_BIDIRECTIONAL: |
| 50 | case DMA_FROM_DEVICE: |
| 51 | invalidate_dcache_range((unsigned long)vaddr, |
| 52 | (unsigned long)(vaddr + size)); |
| 53 | break; |
| 54 | case DMA_TO_DEVICE: |
| 55 | break; |
| 56 | default: |
| 57 | BUG(); |
| 58 | } |
| 59 | } |
| 60 | |
| 61 | static void *nios2_dma_alloc(struct device *dev, size_t size, |
| 62 | dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 63 | { |
| 64 | void *ret; |
| 65 | |
| 66 | /* ignore region specifiers */ |
| 67 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); |
| 68 | |
| 69 | /* optimized page clearing */ |
| 70 | gfp |= __GFP_ZERO; |
| 71 | |
| 72 | if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) |
| 73 | gfp |= GFP_DMA; |
| 74 | |
| 75 | ret = (void *) __get_free_pages(gfp, get_order(size)); |
| 76 | if (ret != NULL) { |
| 77 | *dma_handle = virt_to_phys(ret); |
| 78 | flush_dcache_range((unsigned long) ret, |
| 79 | (unsigned long) ret + size); |
| 80 | ret = UNCAC_ADDR(ret); |
| 81 | } |
| 82 | |
| 83 | return ret; |
| 84 | } |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 85 | |
Christoph Hellwig | 5a1a67f | 2016-01-20 15:01:41 -0800 | [diff] [blame] | 86 | static void nios2_dma_free(struct device *dev, size_t size, void *vaddr, |
| 87 | dma_addr_t dma_handle, struct dma_attrs *attrs) |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 88 | { |
| 89 | unsigned long addr = (unsigned long) CAC_ADDR((unsigned long) vaddr); |
| 90 | |
| 91 | free_pages(addr, get_order(size)); |
| 92 | } |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 93 | |
Christoph Hellwig | 5a1a67f | 2016-01-20 15:01:41 -0800 | [diff] [blame] | 94 | static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg, |
| 95 | int nents, enum dma_data_direction direction, |
| 96 | struct dma_attrs *attrs) |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 97 | { |
| 98 | int i; |
| 99 | |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 100 | for_each_sg(sg, sg, nents, i) { |
| 101 | void *addr; |
| 102 | |
| 103 | addr = sg_virt(sg); |
| 104 | if (addr) { |
| 105 | __dma_sync_for_device(addr, sg->length, direction); |
| 106 | sg->dma_address = sg_phys(sg); |
| 107 | } |
| 108 | } |
| 109 | |
| 110 | return nents; |
| 111 | } |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 112 | |
Christoph Hellwig | 5a1a67f | 2016-01-20 15:01:41 -0800 | [diff] [blame] | 113 | static dma_addr_t nios2_dma_map_page(struct device *dev, struct page *page, |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 114 | unsigned long offset, size_t size, |
Christoph Hellwig | 5a1a67f | 2016-01-20 15:01:41 -0800 | [diff] [blame] | 115 | enum dma_data_direction direction, |
| 116 | struct dma_attrs *attrs) |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 117 | { |
Christoph Hellwig | 5a1a67f | 2016-01-20 15:01:41 -0800 | [diff] [blame] | 118 | void *addr = page_address(page) + offset; |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 119 | |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 120 | __dma_sync_for_device(addr, size, direction); |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 121 | return page_to_phys(page) + offset; |
| 122 | } |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 123 | |
Christoph Hellwig | 5a1a67f | 2016-01-20 15:01:41 -0800 | [diff] [blame] | 124 | static void nios2_dma_unmap_page(struct device *dev, dma_addr_t dma_address, |
| 125 | size_t size, enum dma_data_direction direction, |
| 126 | struct dma_attrs *attrs) |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 127 | { |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 128 | __dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); |
| 129 | } |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 130 | |
Christoph Hellwig | 5a1a67f | 2016-01-20 15:01:41 -0800 | [diff] [blame] | 131 | static void nios2_dma_unmap_sg(struct device *dev, struct scatterlist *sg, |
| 132 | int nhwentries, enum dma_data_direction direction, |
| 133 | struct dma_attrs *attrs) |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 134 | { |
| 135 | void *addr; |
| 136 | int i; |
| 137 | |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 138 | if (direction == DMA_TO_DEVICE) |
| 139 | return; |
| 140 | |
| 141 | for_each_sg(sg, sg, nhwentries, i) { |
| 142 | addr = sg_virt(sg); |
| 143 | if (addr) |
| 144 | __dma_sync_for_cpu(addr, sg->length, direction); |
| 145 | } |
| 146 | } |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 147 | |
Christoph Hellwig | 5a1a67f | 2016-01-20 15:01:41 -0800 | [diff] [blame] | 148 | static void nios2_dma_sync_single_for_cpu(struct device *dev, |
| 149 | dma_addr_t dma_handle, size_t size, |
| 150 | enum dma_data_direction direction) |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 151 | { |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 152 | __dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction); |
| 153 | } |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 154 | |
Christoph Hellwig | 5a1a67f | 2016-01-20 15:01:41 -0800 | [diff] [blame] | 155 | static void nios2_dma_sync_single_for_device(struct device *dev, |
| 156 | dma_addr_t dma_handle, size_t size, |
| 157 | enum dma_data_direction direction) |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 158 | { |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 159 | __dma_sync_for_device(phys_to_virt(dma_handle), size, direction); |
| 160 | } |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 161 | |
Christoph Hellwig | 5a1a67f | 2016-01-20 15:01:41 -0800 | [diff] [blame] | 162 | static void nios2_dma_sync_sg_for_cpu(struct device *dev, |
| 163 | struct scatterlist *sg, int nelems, |
| 164 | enum dma_data_direction direction) |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 165 | { |
| 166 | int i; |
| 167 | |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 168 | /* Make sure that gcc doesn't leave the empty loop body. */ |
| 169 | for_each_sg(sg, sg, nelems, i) |
| 170 | __dma_sync_for_cpu(sg_virt(sg), sg->length, direction); |
| 171 | } |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 172 | |
Christoph Hellwig | 5a1a67f | 2016-01-20 15:01:41 -0800 | [diff] [blame] | 173 | static void nios2_dma_sync_sg_for_device(struct device *dev, |
| 174 | struct scatterlist *sg, int nelems, |
| 175 | enum dma_data_direction direction) |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 176 | { |
| 177 | int i; |
| 178 | |
Ley Foon Tan | e23c621 | 2014-11-06 15:19:53 +0800 | [diff] [blame] | 179 | /* Make sure that gcc doesn't leave the empty loop body. */ |
| 180 | for_each_sg(sg, sg, nelems, i) |
| 181 | __dma_sync_for_device(sg_virt(sg), sg->length, direction); |
| 182 | |
| 183 | } |
Christoph Hellwig | 5a1a67f | 2016-01-20 15:01:41 -0800 | [diff] [blame] | 184 | |
| 185 | struct dma_map_ops nios2_dma_ops = { |
| 186 | .alloc = nios2_dma_alloc, |
| 187 | .free = nios2_dma_free, |
| 188 | .map_page = nios2_dma_map_page, |
| 189 | .unmap_page = nios2_dma_unmap_page, |
| 190 | .map_sg = nios2_dma_map_sg, |
| 191 | .unmap_sg = nios2_dma_unmap_sg, |
| 192 | .sync_single_for_device = nios2_dma_sync_single_for_device, |
| 193 | .sync_single_for_cpu = nios2_dma_sync_single_for_cpu, |
| 194 | .sync_sg_for_cpu = nios2_dma_sync_sg_for_cpu, |
| 195 | .sync_sg_for_device = nios2_dma_sync_sg_for_device, |
| 196 | }; |
| 197 | EXPORT_SYMBOL(nios2_dma_ops); |