Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public License |
| 6 | * as published by the Free Software Foundation, version 2. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, but |
| 9 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or |
| 11 | * NON INFRINGEMENT. See the GNU General Public License for |
| 12 | * more details. |
| 13 | */ |
| 14 | |
| 15 | #include <linux/mm.h> |
| 16 | #include <linux/dma-mapping.h> |
| 17 | #include <linux/vmalloc.h> |
Chris Metcalf | 3989efb | 2011-12-01 11:37:20 -0500 | [diff] [blame] | 18 | #include <linux/export.h> |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 19 | #include <asm/tlbflush.h> |
| 20 | #include <asm/homecache.h> |
| 21 | |
| 22 | /* Generic DMA mapping functions: */ |
| 23 | |
| 24 | /* |
Chris Metcalf | bbaa22c | 2012-06-13 14:46:40 -0400 | [diff] [blame] | 25 | * Allocate what Linux calls "coherent" memory. On TILEPro this is |
| 26 | * uncached memory; on TILE-Gx it is hash-for-home memory. |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 27 | */ |
Chris Metcalf | bbaa22c | 2012-06-13 14:46:40 -0400 | [diff] [blame] | 28 | #ifdef __tilepro__ |
| 29 | #define PAGE_HOME_DMA PAGE_HOME_UNCACHED |
| 30 | #else |
| 31 | #define PAGE_HOME_DMA PAGE_HOME_HASH |
| 32 | #endif |
| 33 | |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 34 | void *dma_alloc_coherent(struct device *dev, |
| 35 | size_t size, |
| 36 | dma_addr_t *dma_handle, |
| 37 | gfp_t gfp) |
| 38 | { |
| 39 | u64 dma_mask = dev->coherent_dma_mask ?: DMA_BIT_MASK(32); |
| 40 | int node = dev_to_node(dev); |
| 41 | int order = get_order(size); |
| 42 | struct page *pg; |
| 43 | dma_addr_t addr; |
| 44 | |
Chris Metcalf | 482e6f8 | 2010-06-05 09:05:47 -0400 | [diff] [blame] | 45 | gfp |= __GFP_ZERO; |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 46 | |
| 47 | /* |
Chris Metcalf | eef015c | 2012-05-09 12:26:30 -0400 | [diff] [blame^] | 48 | * If the mask specifies that the memory be in the first 4 GB, then |
| 49 | * we force the allocation to come from the DMA zone. We also |
| 50 | * force the node to 0 since that's the only node where the DMA |
| 51 | * zone isn't empty. If the mask size is smaller than 32 bits, we |
| 52 | * may still not be able to guarantee a suitable memory address, in |
| 53 | * which case we will return NULL. But such devices are uncommon. |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 54 | */ |
Chris Metcalf | eef015c | 2012-05-09 12:26:30 -0400 | [diff] [blame^] | 55 | if (dma_mask <= DMA_BIT_MASK(32)) { |
| 56 | gfp |= GFP_DMA; |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 57 | node = 0; |
Chris Metcalf | eef015c | 2012-05-09 12:26:30 -0400 | [diff] [blame^] | 58 | } |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 59 | |
Chris Metcalf | bbaa22c | 2012-06-13 14:46:40 -0400 | [diff] [blame] | 60 | pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_DMA); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 61 | if (pg == NULL) |
| 62 | return NULL; |
| 63 | |
| 64 | addr = page_to_phys(pg); |
| 65 | if (addr + size > dma_mask) { |
Chris Metcalf | bbaa22c | 2012-06-13 14:46:40 -0400 | [diff] [blame] | 66 | __homecache_free_pages(pg, order); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 67 | return NULL; |
| 68 | } |
| 69 | |
| 70 | *dma_handle = addr; |
| 71 | return page_address(pg); |
| 72 | } |
| 73 | EXPORT_SYMBOL(dma_alloc_coherent); |
| 74 | |
| 75 | /* |
| 76 | * Free memory that was allocated with dma_alloc_coherent. |
| 77 | */ |
| 78 | void dma_free_coherent(struct device *dev, size_t size, |
| 79 | void *vaddr, dma_addr_t dma_handle) |
| 80 | { |
| 81 | homecache_free_pages((unsigned long)vaddr, get_order(size)); |
| 82 | } |
| 83 | EXPORT_SYMBOL(dma_free_coherent); |
| 84 | |
| 85 | /* |
| 86 | * The map routines "map" the specified address range for DMA |
| 87 | * accesses. The memory belongs to the device after this call is |
| 88 | * issued, until it is unmapped with dma_unmap_single. |
| 89 | * |
| 90 | * We don't need to do any mapping, we just flush the address range |
| 91 | * out of the cache and return a DMA address. |
| 92 | * |
| 93 | * The unmap routines do whatever is necessary before the processor |
| 94 | * accesses the memory again, and must be called before the driver |
| 95 | * touches the memory. We can get away with a cache invalidate if we |
| 96 | * can count on nothing having been touched. |
| 97 | */ |
| 98 | |
Chris Metcalf | bbaa22c | 2012-06-13 14:46:40 -0400 | [diff] [blame] | 99 | /* Set up a single page for DMA access. */ |
| 100 | static void __dma_prep_page(struct page *page, unsigned long offset, |
| 101 | size_t size, enum dma_data_direction direction) |
| 102 | { |
| 103 | /* |
| 104 | * Flush the page from cache if necessary. |
| 105 | * On tilegx, data is delivered to hash-for-home L3; on tilepro, |
| 106 | * data is delivered direct to memory. |
| 107 | * |
| 108 | * NOTE: If we were just doing DMA_TO_DEVICE we could optimize |
| 109 | * this to be a "flush" not a "finv" and keep some of the |
| 110 | * state in cache across the DMA operation, but it doesn't seem |
| 111 | * worth creating the necessary flush_buffer_xxx() infrastructure. |
| 112 | */ |
| 113 | int home = page_home(page); |
| 114 | switch (home) { |
| 115 | case PAGE_HOME_HASH: |
| 116 | #ifdef __tilegx__ |
| 117 | return; |
| 118 | #endif |
| 119 | break; |
| 120 | case PAGE_HOME_UNCACHED: |
| 121 | #ifdef __tilepro__ |
| 122 | return; |
| 123 | #endif |
| 124 | break; |
| 125 | case PAGE_HOME_IMMUTABLE: |
| 126 | /* Should be going to the device only. */ |
| 127 | BUG_ON(direction == DMA_FROM_DEVICE || |
| 128 | direction == DMA_BIDIRECTIONAL); |
| 129 | return; |
| 130 | case PAGE_HOME_INCOHERENT: |
| 131 | /* Incoherent anyway, so no need to work hard here. */ |
| 132 | return; |
| 133 | default: |
| 134 | BUG_ON(home < 0 || home >= NR_CPUS); |
| 135 | break; |
| 136 | } |
| 137 | homecache_finv_page(page); |
| 138 | |
| 139 | #ifdef DEBUG_ALIGNMENT |
| 140 | /* Warn if the region isn't cacheline aligned. */ |
| 141 | if (offset & (L2_CACHE_BYTES - 1) || (size & (L2_CACHE_BYTES - 1))) |
| 142 | pr_warn("Unaligned DMA to non-hfh memory: PA %#llx/%#lx\n", |
| 143 | PFN_PHYS(page_to_pfn(page)) + offset, size); |
| 144 | #endif |
| 145 | } |
| 146 | |
| 147 | /* Make the page ready to be read by the core. */ |
| 148 | static void __dma_complete_page(struct page *page, unsigned long offset, |
| 149 | size_t size, enum dma_data_direction direction) |
| 150 | { |
| 151 | #ifdef __tilegx__ |
| 152 | switch (page_home(page)) { |
| 153 | case PAGE_HOME_HASH: |
| 154 | /* I/O device delivered data the way the cpu wanted it. */ |
| 155 | break; |
| 156 | case PAGE_HOME_INCOHERENT: |
| 157 | /* Incoherent anyway, so no need to work hard here. */ |
| 158 | break; |
| 159 | case PAGE_HOME_IMMUTABLE: |
| 160 | /* Extra read-only copies are not a problem. */ |
| 161 | break; |
| 162 | default: |
| 163 | /* Flush the bogus hash-for-home I/O entries to memory. */ |
| 164 | homecache_finv_map_page(page, PAGE_HOME_HASH); |
| 165 | break; |
| 166 | } |
| 167 | #endif |
| 168 | } |
| 169 | |
| 170 | static void __dma_prep_pa_range(dma_addr_t dma_addr, size_t size, |
| 171 | enum dma_data_direction direction) |
Chris Metcalf | 76c567f | 2011-02-28 16:37:34 -0500 | [diff] [blame] | 172 | { |
| 173 | struct page *page = pfn_to_page(PFN_DOWN(dma_addr)); |
Chris Metcalf | bbaa22c | 2012-06-13 14:46:40 -0400 | [diff] [blame] | 174 | unsigned long offset = dma_addr & (PAGE_SIZE - 1); |
| 175 | size_t bytes = min(size, (size_t)(PAGE_SIZE - offset)); |
Chris Metcalf | 76c567f | 2011-02-28 16:37:34 -0500 | [diff] [blame] | 176 | |
Chris Metcalf | bbaa22c | 2012-06-13 14:46:40 -0400 | [diff] [blame] | 177 | while (size != 0) { |
| 178 | __dma_prep_page(page, offset, bytes, direction); |
| 179 | size -= bytes; |
| 180 | ++page; |
| 181 | offset = 0; |
| 182 | bytes = min((size_t)PAGE_SIZE, size); |
Chris Metcalf | 76c567f | 2011-02-28 16:37:34 -0500 | [diff] [blame] | 183 | } |
| 184 | } |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 185 | |
Chris Metcalf | bbaa22c | 2012-06-13 14:46:40 -0400 | [diff] [blame] | 186 | static void __dma_complete_pa_range(dma_addr_t dma_addr, size_t size, |
| 187 | enum dma_data_direction direction) |
| 188 | { |
| 189 | struct page *page = pfn_to_page(PFN_DOWN(dma_addr)); |
| 190 | unsigned long offset = dma_addr & (PAGE_SIZE - 1); |
| 191 | size_t bytes = min(size, (size_t)(PAGE_SIZE - offset)); |
| 192 | |
| 193 | while (size != 0) { |
| 194 | __dma_complete_page(page, offset, bytes, direction); |
| 195 | size -= bytes; |
| 196 | ++page; |
| 197 | offset = 0; |
| 198 | bytes = min((size_t)PAGE_SIZE, size); |
| 199 | } |
| 200 | } |
| 201 | |
| 202 | |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 203 | /* |
| 204 | * dma_map_single can be passed any memory address, and there appear |
| 205 | * to be no alignment constraints. |
| 206 | * |
| 207 | * There is a chance that the start of the buffer will share a cache |
| 208 | * line with some other data that has been touched in the meantime. |
| 209 | */ |
| 210 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, |
Chris Metcalf | bbaa22c | 2012-06-13 14:46:40 -0400 | [diff] [blame] | 211 | enum dma_data_direction direction) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 212 | { |
Chris Metcalf | 76c567f | 2011-02-28 16:37:34 -0500 | [diff] [blame] | 213 | dma_addr_t dma_addr = __pa(ptr); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 214 | |
| 215 | BUG_ON(!valid_dma_direction(direction)); |
| 216 | WARN_ON(size == 0); |
| 217 | |
Chris Metcalf | bbaa22c | 2012-06-13 14:46:40 -0400 | [diff] [blame] | 218 | __dma_prep_pa_range(dma_addr, size, direction); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 219 | |
| 220 | return dma_addr; |
| 221 | } |
| 222 | EXPORT_SYMBOL(dma_map_single); |
| 223 | |
| 224 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, |
Chris Metcalf | bbaa22c | 2012-06-13 14:46:40 -0400 | [diff] [blame] | 225 | enum dma_data_direction direction) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 226 | { |
| 227 | BUG_ON(!valid_dma_direction(direction)); |
Chris Metcalf | bbaa22c | 2012-06-13 14:46:40 -0400 | [diff] [blame] | 228 | __dma_complete_pa_range(dma_addr, size, direction); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 229 | } |
| 230 | EXPORT_SYMBOL(dma_unmap_single); |
| 231 | |
| 232 | int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, |
Chris Metcalf | bbaa22c | 2012-06-13 14:46:40 -0400 | [diff] [blame] | 233 | enum dma_data_direction direction) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 234 | { |
| 235 | struct scatterlist *sg; |
| 236 | int i; |
| 237 | |
| 238 | BUG_ON(!valid_dma_direction(direction)); |
| 239 | |
| 240 | WARN_ON(nents == 0 || sglist->length == 0); |
| 241 | |
| 242 | for_each_sg(sglist, sg, nents, i) { |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 243 | sg->dma_address = sg_phys(sg); |
Chris Metcalf | bbaa22c | 2012-06-13 14:46:40 -0400 | [diff] [blame] | 244 | __dma_prep_pa_range(sg->dma_address, sg->length, direction); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 245 | } |
| 246 | |
| 247 | return nents; |
| 248 | } |
| 249 | EXPORT_SYMBOL(dma_map_sg); |
| 250 | |
Chris Metcalf | bbaa22c | 2012-06-13 14:46:40 -0400 | [diff] [blame] | 251 | void dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, |
| 252 | enum dma_data_direction direction) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 253 | { |
Chris Metcalf | bbaa22c | 2012-06-13 14:46:40 -0400 | [diff] [blame] | 254 | struct scatterlist *sg; |
| 255 | int i; |
| 256 | |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 257 | BUG_ON(!valid_dma_direction(direction)); |
Chris Metcalf | bbaa22c | 2012-06-13 14:46:40 -0400 | [diff] [blame] | 258 | for_each_sg(sglist, sg, nents, i) { |
| 259 | sg->dma_address = sg_phys(sg); |
| 260 | __dma_complete_pa_range(sg->dma_address, sg->length, |
| 261 | direction); |
| 262 | } |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 263 | } |
| 264 | EXPORT_SYMBOL(dma_unmap_sg); |
| 265 | |
| 266 | dma_addr_t dma_map_page(struct device *dev, struct page *page, |
| 267 | unsigned long offset, size_t size, |
| 268 | enum dma_data_direction direction) |
| 269 | { |
| 270 | BUG_ON(!valid_dma_direction(direction)); |
| 271 | |
Chris Metcalf | 76c567f | 2011-02-28 16:37:34 -0500 | [diff] [blame] | 272 | BUG_ON(offset + size > PAGE_SIZE); |
Chris Metcalf | bbaa22c | 2012-06-13 14:46:40 -0400 | [diff] [blame] | 273 | __dma_prep_page(page, offset, size, direction); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 274 | return page_to_pa(page) + offset; |
| 275 | } |
| 276 | EXPORT_SYMBOL(dma_map_page); |
| 277 | |
| 278 | void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, |
Chris Metcalf | bbaa22c | 2012-06-13 14:46:40 -0400 | [diff] [blame] | 279 | enum dma_data_direction direction) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 280 | { |
| 281 | BUG_ON(!valid_dma_direction(direction)); |
Chris Metcalf | bbaa22c | 2012-06-13 14:46:40 -0400 | [diff] [blame] | 282 | __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)), |
| 283 | dma_address & PAGE_OFFSET, size, direction); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 284 | } |
| 285 | EXPORT_SYMBOL(dma_unmap_page); |
| 286 | |
| 287 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, |
| 288 | size_t size, enum dma_data_direction direction) |
| 289 | { |
| 290 | BUG_ON(!valid_dma_direction(direction)); |
Chris Metcalf | bbaa22c | 2012-06-13 14:46:40 -0400 | [diff] [blame] | 291 | __dma_complete_pa_range(dma_handle, size, direction); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 292 | } |
| 293 | EXPORT_SYMBOL(dma_sync_single_for_cpu); |
| 294 | |
| 295 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, |
| 296 | size_t size, enum dma_data_direction direction) |
| 297 | { |
Chris Metcalf | bbaa22c | 2012-06-13 14:46:40 -0400 | [diff] [blame] | 298 | __dma_prep_pa_range(dma_handle, size, direction); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 299 | } |
| 300 | EXPORT_SYMBOL(dma_sync_single_for_device); |
| 301 | |
Chris Metcalf | bbaa22c | 2012-06-13 14:46:40 -0400 | [diff] [blame] | 302 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, |
| 303 | int nelems, enum dma_data_direction direction) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 304 | { |
Chris Metcalf | bbaa22c | 2012-06-13 14:46:40 -0400 | [diff] [blame] | 305 | struct scatterlist *sg; |
| 306 | int i; |
| 307 | |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 308 | BUG_ON(!valid_dma_direction(direction)); |
Chris Metcalf | bbaa22c | 2012-06-13 14:46:40 -0400 | [diff] [blame] | 309 | WARN_ON(nelems == 0 || sglist->length == 0); |
| 310 | |
| 311 | for_each_sg(sglist, sg, nelems, i) { |
| 312 | dma_sync_single_for_cpu(dev, sg->dma_address, |
| 313 | sg_dma_len(sg), direction); |
| 314 | } |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 315 | } |
| 316 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); |
| 317 | |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 318 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, |
| 319 | int nelems, enum dma_data_direction direction) |
| 320 | { |
| 321 | struct scatterlist *sg; |
| 322 | int i; |
| 323 | |
| 324 | BUG_ON(!valid_dma_direction(direction)); |
| 325 | WARN_ON(nelems == 0 || sglist->length == 0); |
| 326 | |
| 327 | for_each_sg(sglist, sg, nelems, i) { |
| 328 | dma_sync_single_for_device(dev, sg->dma_address, |
| 329 | sg_dma_len(sg), direction); |
| 330 | } |
| 331 | } |
| 332 | EXPORT_SYMBOL(dma_sync_sg_for_device); |
| 333 | |
| 334 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, |
| 335 | unsigned long offset, size_t size, |
| 336 | enum dma_data_direction direction) |
| 337 | { |
| 338 | dma_sync_single_for_cpu(dev, dma_handle + offset, size, direction); |
| 339 | } |
| 340 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); |
| 341 | |
| 342 | void dma_sync_single_range_for_device(struct device *dev, |
| 343 | dma_addr_t dma_handle, |
| 344 | unsigned long offset, size_t size, |
| 345 | enum dma_data_direction direction) |
| 346 | { |
| 347 | dma_sync_single_for_device(dev, dma_handle + offset, size, direction); |
| 348 | } |
| 349 | EXPORT_SYMBOL(dma_sync_single_range_for_device); |
| 350 | |
| 351 | /* |
Chris Metcalf | bbaa22c | 2012-06-13 14:46:40 -0400 | [diff] [blame] | 352 | * dma_alloc_noncoherent() is #defined to return coherent memory, |
| 353 | * so there's no need to do any flushing here. |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 354 | */ |
James Hogan | ef0aaf8 | 2011-04-04 16:21:47 +0100 | [diff] [blame] | 355 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 356 | enum dma_data_direction direction) |
| 357 | { |
| 358 | } |
| 359 | EXPORT_SYMBOL(dma_cache_sync); |