Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1 | /* |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 2 | * DMA coherent memory allocation. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify it |
| 5 | * under the terms of the GNU General Public License as published by the |
| 6 | * Free Software Foundation; either version 2 of the License, or (at your |
| 7 | * option) any later version. |
| 8 | * |
| 9 | * Copyright (C) 2002 - 2005 Tensilica Inc. |
Max Filippov | c75959a | 2015-05-25 06:55:05 +0300 | [diff] [blame] | 10 | * Copyright (C) 2015 Cadence Design Systems Inc. |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 11 | * |
| 12 | * Based on version for i386. |
| 13 | * |
| 14 | * Chris Zankel <chris@zankel.net> |
| 15 | * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> |
| 16 | */ |
| 17 | |
| 18 | #include <linux/types.h> |
| 19 | #include <linux/mm.h> |
| 20 | #include <linux/string.h> |
| 21 | #include <linux/pci.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 22 | #include <linux/gfp.h> |
Max Filippov | d3738f4 | 2012-09-17 05:44:56 +0400 | [diff] [blame] | 23 | #include <linux/module.h> |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 24 | #include <asm/io.h> |
| 25 | #include <asm/cacheflush.h> |
| 26 | |
Max Filippov | c75959a | 2015-05-25 06:55:05 +0300 | [diff] [blame] | 27 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
| 28 | enum dma_data_direction dir) |
| 29 | { |
| 30 | switch (dir) { |
| 31 | case DMA_BIDIRECTIONAL: |
| 32 | __flush_invalidate_dcache_range((unsigned long)vaddr, size); |
| 33 | break; |
| 34 | |
| 35 | case DMA_FROM_DEVICE: |
| 36 | __invalidate_dcache_range((unsigned long)vaddr, size); |
| 37 | break; |
| 38 | |
| 39 | case DMA_TO_DEVICE: |
| 40 | __flush_dcache_range((unsigned long)vaddr, size); |
| 41 | break; |
| 42 | |
| 43 | case DMA_NONE: |
| 44 | BUG(); |
| 45 | break; |
| 46 | } |
| 47 | } |
| 48 | EXPORT_SYMBOL(dma_cache_sync); |
| 49 | |
| 50 | static void xtensa_sync_single_for_cpu(struct device *dev, |
| 51 | dma_addr_t dma_handle, size_t size, |
| 52 | enum dma_data_direction dir) |
| 53 | { |
| 54 | void *vaddr; |
| 55 | |
| 56 | switch (dir) { |
| 57 | case DMA_BIDIRECTIONAL: |
| 58 | case DMA_FROM_DEVICE: |
| 59 | vaddr = bus_to_virt(dma_handle); |
| 60 | __invalidate_dcache_range((unsigned long)vaddr, size); |
| 61 | break; |
| 62 | |
| 63 | case DMA_NONE: |
| 64 | BUG(); |
| 65 | break; |
| 66 | |
| 67 | default: |
| 68 | break; |
| 69 | } |
| 70 | } |
| 71 | |
| 72 | static void xtensa_sync_single_for_device(struct device *dev, |
| 73 | dma_addr_t dma_handle, size_t size, |
| 74 | enum dma_data_direction dir) |
| 75 | { |
| 76 | void *vaddr; |
| 77 | |
| 78 | switch (dir) { |
| 79 | case DMA_BIDIRECTIONAL: |
| 80 | case DMA_TO_DEVICE: |
| 81 | vaddr = bus_to_virt(dma_handle); |
| 82 | __flush_dcache_range((unsigned long)vaddr, size); |
| 83 | break; |
| 84 | |
| 85 | case DMA_NONE: |
| 86 | BUG(); |
| 87 | break; |
| 88 | |
| 89 | default: |
| 90 | break; |
| 91 | } |
| 92 | } |
| 93 | |
| 94 | static void xtensa_sync_sg_for_cpu(struct device *dev, |
| 95 | struct scatterlist *sg, int nents, |
| 96 | enum dma_data_direction dir) |
| 97 | { |
| 98 | struct scatterlist *s; |
| 99 | int i; |
| 100 | |
| 101 | for_each_sg(sg, s, nents, i) { |
| 102 | xtensa_sync_single_for_cpu(dev, sg_dma_address(s), |
| 103 | sg_dma_len(s), dir); |
| 104 | } |
| 105 | } |
| 106 | |
| 107 | static void xtensa_sync_sg_for_device(struct device *dev, |
| 108 | struct scatterlist *sg, int nents, |
| 109 | enum dma_data_direction dir) |
| 110 | { |
| 111 | struct scatterlist *s; |
| 112 | int i; |
| 113 | |
| 114 | for_each_sg(sg, s, nents, i) { |
| 115 | xtensa_sync_single_for_device(dev, sg_dma_address(s), |
| 116 | sg_dma_len(s), dir); |
| 117 | } |
| 118 | } |
| 119 | |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 120 | /* |
| 121 | * Note: We assume that the full memory space is always mapped to 'kseg' |
| 122 | * Otherwise we have to use page attributes (not implemented). |
| 123 | */ |
| 124 | |
Max Filippov | c75959a | 2015-05-25 06:55:05 +0300 | [diff] [blame] | 125 | static void *xtensa_dma_alloc(struct device *dev, size_t size, |
| 126 | dma_addr_t *handle, gfp_t flag, |
| 127 | struct dma_attrs *attrs) |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 128 | { |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 129 | unsigned long ret; |
| 130 | unsigned long uncached = 0; |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 131 | |
| 132 | /* ignore region speicifiers */ |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 133 | |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 134 | flag &= ~(__GFP_DMA | __GFP_HIGHMEM); |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 135 | |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 136 | if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) |
| 137 | flag |= GFP_DMA; |
| 138 | ret = (unsigned long)__get_free_pages(flag, get_order(size)); |
| 139 | |
| 140 | if (ret == 0) |
| 141 | return NULL; |
| 142 | |
| 143 | /* We currently don't support coherent memory outside KSEG */ |
| 144 | |
Alan Douglas | 1ca4946 | 2014-07-23 14:06:40 +0400 | [diff] [blame] | 145 | BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR || |
| 146 | ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1); |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 147 | |
Max Filippov | c75959a | 2015-05-25 06:55:05 +0300 | [diff] [blame] | 148 | uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR; |
| 149 | *handle = virt_to_bus((void *)ret); |
| 150 | __invalidate_dcache_range(ret, size); |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 151 | |
Max Filippov | c75959a | 2015-05-25 06:55:05 +0300 | [diff] [blame] | 152 | return (void *)uncached; |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 153 | } |
| 154 | |
Max Filippov | c75959a | 2015-05-25 06:55:05 +0300 | [diff] [blame] | 155 | static void xtensa_dma_free(struct device *hwdev, size_t size, void *vaddr, |
| 156 | dma_addr_t dma_handle, struct dma_attrs *attrs) |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 157 | { |
Alan Douglas | 1ca4946 | 2014-07-23 14:06:40 +0400 | [diff] [blame] | 158 | unsigned long addr = (unsigned long)vaddr + |
| 159 | XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR; |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 160 | |
Alan Douglas | 1ca4946 | 2014-07-23 14:06:40 +0400 | [diff] [blame] | 161 | BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR || |
| 162 | addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1); |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 163 | |
| 164 | free_pages(addr, get_order(size)); |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 165 | } |
| 166 | |
Max Filippov | c75959a | 2015-05-25 06:55:05 +0300 | [diff] [blame] | 167 | static dma_addr_t xtensa_map_page(struct device *dev, struct page *page, |
| 168 | unsigned long offset, size_t size, |
| 169 | enum dma_data_direction dir, |
| 170 | struct dma_attrs *attrs) |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 171 | { |
Max Filippov | c75959a | 2015-05-25 06:55:05 +0300 | [diff] [blame] | 172 | dma_addr_t dma_handle = page_to_phys(page) + offset; |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 173 | |
Max Filippov | c75959a | 2015-05-25 06:55:05 +0300 | [diff] [blame] | 174 | BUG_ON(PageHighMem(page)); |
| 175 | xtensa_sync_single_for_device(dev, dma_handle, size, dir); |
| 176 | return dma_handle; |
| 177 | } |
| 178 | |
| 179 | static void xtensa_unmap_page(struct device *dev, dma_addr_t dma_handle, |
| 180 | size_t size, enum dma_data_direction dir, |
| 181 | struct dma_attrs *attrs) |
| 182 | { |
| 183 | xtensa_sync_single_for_cpu(dev, dma_handle, size, dir); |
| 184 | } |
| 185 | |
| 186 | static int xtensa_map_sg(struct device *dev, struct scatterlist *sg, |
| 187 | int nents, enum dma_data_direction dir, |
| 188 | struct dma_attrs *attrs) |
| 189 | { |
| 190 | struct scatterlist *s; |
| 191 | int i; |
| 192 | |
| 193 | for_each_sg(sg, s, nents, i) { |
| 194 | s->dma_address = xtensa_map_page(dev, sg_page(s), s->offset, |
| 195 | s->length, dir, attrs); |
| 196 | } |
| 197 | return nents; |
| 198 | } |
| 199 | |
| 200 | static void xtensa_unmap_sg(struct device *dev, |
| 201 | struct scatterlist *sg, int nents, |
| 202 | enum dma_data_direction dir, |
| 203 | struct dma_attrs *attrs) |
| 204 | { |
| 205 | struct scatterlist *s; |
| 206 | int i; |
| 207 | |
| 208 | for_each_sg(sg, s, nents, i) { |
| 209 | xtensa_unmap_page(dev, sg_dma_address(s), |
| 210 | sg_dma_len(s), dir, attrs); |
Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 211 | } |
| 212 | } |
Max Filippov | c75959a | 2015-05-25 06:55:05 +0300 | [diff] [blame] | 213 | |
| 214 | int xtensa_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
| 215 | { |
| 216 | return 0; |
| 217 | } |
| 218 | |
| 219 | struct dma_map_ops xtensa_dma_map_ops = { |
| 220 | .alloc = xtensa_dma_alloc, |
| 221 | .free = xtensa_dma_free, |
| 222 | .map_page = xtensa_map_page, |
| 223 | .unmap_page = xtensa_unmap_page, |
| 224 | .map_sg = xtensa_map_sg, |
| 225 | .unmap_sg = xtensa_unmap_sg, |
| 226 | .sync_single_for_cpu = xtensa_sync_single_for_cpu, |
| 227 | .sync_single_for_device = xtensa_sync_single_for_device, |
| 228 | .sync_sg_for_cpu = xtensa_sync_sg_for_cpu, |
| 229 | .sync_sg_for_device = xtensa_sync_sg_for_device, |
| 230 | .mapping_error = xtensa_dma_mapping_error, |
| 231 | }; |
| 232 | EXPORT_SYMBOL(xtensa_dma_map_ops); |
| 233 | |
| 234 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) |
| 235 | |
| 236 | static int __init xtensa_dma_init(void) |
| 237 | { |
| 238 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); |
| 239 | return 0; |
| 240 | } |
| 241 | fs_initcall(xtensa_dma_init); |