Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 as |
| 6 | * published by the Free Software Foundation. |
| 7 | */ |
| 8 | |
| 9 | /* |
| 10 | * DMA Coherent API Notes |
| 11 | * |
| 12 | * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is |
Andrea Gelmini | 2547476 | 2016-05-21 13:45:35 +0200 | [diff] [blame] | 13 | * implemented by accessing it using a kernel virtual address, with |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 14 | * Cache bit off in the TLB entry. |
| 15 | * |
| 16 | * The default DMA address == Phy address which is 0x8000_0000 based. |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 17 | */ |
| 18 | |
| 19 | #include <linux/dma-mapping.h> |
Alexey Brodkin | f2b0b25 | 2015-05-25 19:54:28 +0300 | [diff] [blame] | 20 | #include <asm/cache.h> |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 21 | #include <asm/cacheflush.h> |
| 22 | |
Christoph Hellwig | 052c96d | 2016-01-20 15:01:26 -0800 | [diff] [blame] | 23 | |
| 24 | static void *arc_dma_alloc(struct device *dev, size_t size, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 25 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 26 | { |
Vineet Gupta | d98a15a | 2016-03-14 15:03:59 +0530 | [diff] [blame] | 27 | unsigned long order = get_order(size); |
| 28 | struct page *page; |
| 29 | phys_addr_t paddr; |
| 30 | void *kvaddr; |
Vineet Gupta | 6b70039 | 2016-03-14 15:34:36 +0530 | [diff] [blame] | 31 | int need_coh = 1, need_kvaddr = 0; |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 32 | |
Vineet Gupta | d98a15a | 2016-03-14 15:03:59 +0530 | [diff] [blame] | 33 | page = alloc_pages(gfp, order); |
| 34 | if (!page) |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 35 | return NULL; |
| 36 | |
Alexey Brodkin | f2b0b25 | 2015-05-25 19:54:28 +0300 | [diff] [blame] | 37 | /* |
| 38 | * IOC relies on all data (even coherent DMA data) being in cache |
| 39 | * Thus allocate normal cached memory |
| 40 | * |
| 41 | * The gains with IOC are two pronged: |
Vineet Gupta | 6b70039 | 2016-03-14 15:34:36 +0530 | [diff] [blame] | 42 | * -For streaming data, elides need for cache maintenance, saving |
Alexey Brodkin | f2b0b25 | 2015-05-25 19:54:28 +0300 | [diff] [blame] | 43 | * cycles in flush code, and bus bandwidth as all the lines of a |
| 44 | * buffer need to be flushed out to memory |
| 45 | * -For coherent data, Read/Write to buffers terminate early in cache |
| 46 | * (vs. always going to memory - thus are faster) |
| 47 | */ |
Vineet Gupta | cf986d4 | 2016-10-13 15:58:59 -0700 | [diff] [blame] | 48 | if ((is_isa_arcv2() && ioc_enable) || |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 49 | (attrs & DMA_ATTR_NON_CONSISTENT)) |
Vineet Gupta | 6b70039 | 2016-03-14 15:34:36 +0530 | [diff] [blame] | 50 | need_coh = 0; |
| 51 | |
| 52 | /* |
| 53 | * - A coherent buffer needs MMU mapping to enforce non-cachability |
| 54 | * - A highmem page needs a virtual handle (hence MMU mapping) |
| 55 | * independent of cachability |
| 56 | */ |
| 57 | if (PageHighMem(page) || need_coh) |
| 58 | need_kvaddr = 1; |
| 59 | |
| 60 | /* This is linear addr (0x8000_0000 based) */ |
| 61 | paddr = page_to_phys(page); |
| 62 | |
Vineet Gupta | f2e3d55 | 2016-03-16 16:38:57 +0530 | [diff] [blame] | 63 | *dma_handle = plat_phys_to_dma(dev, paddr); |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 64 | |
| 65 | /* This is kernel Virtual address (0x7000_0000 based) */ |
Vineet Gupta | 6b70039 | 2016-03-14 15:34:36 +0530 | [diff] [blame] | 66 | if (need_kvaddr) { |
Vineet Gupta | f5db19e | 2016-03-16 15:04:39 +0530 | [diff] [blame] | 67 | kvaddr = ioremap_nocache(paddr, size); |
Vineet Gupta | 6b70039 | 2016-03-14 15:34:36 +0530 | [diff] [blame] | 68 | if (kvaddr == NULL) { |
| 69 | __free_pages(page, order); |
| 70 | return NULL; |
| 71 | } |
| 72 | } else { |
Vineet Gupta | f5db19e | 2016-03-16 15:04:39 +0530 | [diff] [blame] | 73 | kvaddr = (void *)(u32)paddr; |
Vineet Gupta | d98a15a | 2016-03-14 15:03:59 +0530 | [diff] [blame] | 74 | } |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 75 | |
Vineet Gupta | 795f455 | 2015-04-03 12:37:07 +0300 | [diff] [blame] | 76 | /* |
| 77 | * Evict any existing L1 and/or L2 lines for the backing page |
| 78 | * in case it was used earlier as a normal "cached" page. |
| 79 | * Yeah this bit us - STAR 9000898266 |
| 80 | * |
| 81 | * Although core does call flush_cache_vmap(), it gets kvaddr hence |
| 82 | * can't be used to efficiently flush L1 and/or L2 which need paddr |
| 83 | * Currently flush_cache_vmap nukes the L1 cache completely which |
| 84 | * will be optimized as a separate commit |
| 85 | */ |
Vineet Gupta | 6b70039 | 2016-03-14 15:34:36 +0530 | [diff] [blame] | 86 | if (need_coh) |
Vineet Gupta | f5db19e | 2016-03-16 15:04:39 +0530 | [diff] [blame] | 87 | dma_cache_wback_inv(paddr, size); |
Vineet Gupta | 795f455 | 2015-04-03 12:37:07 +0300 | [diff] [blame] | 88 | |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 89 | return kvaddr; |
| 90 | } |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 91 | |
Christoph Hellwig | 052c96d | 2016-01-20 15:01:26 -0800 | [diff] [blame] | 92 | static void arc_dma_free(struct device *dev, size_t size, void *vaddr, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 93 | dma_addr_t dma_handle, unsigned long attrs) |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 94 | { |
Vladimir Kondratiev | b4dff28 | 2016-07-03 10:07:48 +0300 | [diff] [blame] | 95 | phys_addr_t paddr = plat_dma_to_phys(dev, dma_handle); |
| 96 | struct page *page = virt_to_page(paddr); |
Vineet Gupta | 6b70039 | 2016-03-14 15:34:36 +0530 | [diff] [blame] | 97 | int is_non_coh = 1; |
Vineet Gupta | d98a15a | 2016-03-14 15:03:59 +0530 | [diff] [blame] | 98 | |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 99 | is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) || |
Vineet Gupta | cf986d4 | 2016-10-13 15:58:59 -0700 | [diff] [blame] | 100 | (is_isa_arcv2() && ioc_enable); |
Vineet Gupta | 6b70039 | 2016-03-14 15:34:36 +0530 | [diff] [blame] | 101 | |
| 102 | if (PageHighMem(page) || !is_non_coh) |
Christoph Hellwig | 052c96d | 2016-01-20 15:01:26 -0800 | [diff] [blame] | 103 | iounmap((void __force __iomem *)vaddr); |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 104 | |
Vineet Gupta | d98a15a | 2016-03-14 15:03:59 +0530 | [diff] [blame] | 105 | __free_pages(page, get_order(size)); |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 106 | } |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 107 | |
Alexey Brodkin | a79a812 | 2016-11-03 18:06:13 +0300 | [diff] [blame] | 108 | static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
| 109 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
| 110 | unsigned long attrs) |
| 111 | { |
| 112 | unsigned long user_count = vma_pages(vma); |
| 113 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
| 114 | unsigned long pfn = __phys_to_pfn(plat_dma_to_phys(dev, dma_addr)); |
| 115 | unsigned long off = vma->vm_pgoff; |
| 116 | int ret = -ENXIO; |
| 117 | |
| 118 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
| 119 | |
Vladimir Murzin | 43fc509 | 2017-07-20 11:19:58 +0100 | [diff] [blame^] | 120 | if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) |
Alexey Brodkin | a79a812 | 2016-11-03 18:06:13 +0300 | [diff] [blame] | 121 | return ret; |
| 122 | |
| 123 | if (off < count && user_count <= (count - off)) { |
| 124 | ret = remap_pfn_range(vma, vma->vm_start, |
| 125 | pfn + off, |
| 126 | user_count << PAGE_SHIFT, |
| 127 | vma->vm_page_prot); |
| 128 | } |
| 129 | |
| 130 | return ret; |
| 131 | } |
| 132 | |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 133 | /* |
Christoph Hellwig | 052c96d | 2016-01-20 15:01:26 -0800 | [diff] [blame] | 134 | * streaming DMA Mapping API... |
| 135 | * CPU accesses page via normal paddr, thus needs to explicitly made |
| 136 | * consistent before each use |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 137 | */ |
Vineet Gupta | f5db19e | 2016-03-16 15:04:39 +0530 | [diff] [blame] | 138 | static void _dma_cache_sync(phys_addr_t paddr, size_t size, |
Christoph Hellwig | 052c96d | 2016-01-20 15:01:26 -0800 | [diff] [blame] | 139 | enum dma_data_direction dir) |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 140 | { |
Christoph Hellwig | 052c96d | 2016-01-20 15:01:26 -0800 | [diff] [blame] | 141 | switch (dir) { |
| 142 | case DMA_FROM_DEVICE: |
| 143 | dma_cache_inv(paddr, size); |
| 144 | break; |
| 145 | case DMA_TO_DEVICE: |
| 146 | dma_cache_wback(paddr, size); |
| 147 | break; |
| 148 | case DMA_BIDIRECTIONAL: |
| 149 | dma_cache_wback_inv(paddr, size); |
| 150 | break; |
| 151 | default: |
Vineet Gupta | f5db19e | 2016-03-16 15:04:39 +0530 | [diff] [blame] | 152 | pr_err("Invalid DMA dir [%d] for OP @ %pa[p]\n", dir, &paddr); |
Christoph Hellwig | 052c96d | 2016-01-20 15:01:26 -0800 | [diff] [blame] | 153 | } |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 154 | } |
Christoph Hellwig | 052c96d | 2016-01-20 15:01:26 -0800 | [diff] [blame] | 155 | |
| 156 | static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page, |
| 157 | unsigned long offset, size_t size, enum dma_data_direction dir, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 158 | unsigned long attrs) |
Christoph Hellwig | 052c96d | 2016-01-20 15:01:26 -0800 | [diff] [blame] | 159 | { |
Vineet Gupta | f5db19e | 2016-03-16 15:04:39 +0530 | [diff] [blame] | 160 | phys_addr_t paddr = page_to_phys(page) + offset; |
Alexander Duyck | 8a3385d | 2016-12-14 15:04:29 -0800 | [diff] [blame] | 161 | |
| 162 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
| 163 | _dma_cache_sync(paddr, size, dir); |
| 164 | |
Vineet Gupta | f2e3d55 | 2016-03-16 16:38:57 +0530 | [diff] [blame] | 165 | return plat_phys_to_dma(dev, paddr); |
Christoph Hellwig | 052c96d | 2016-01-20 15:01:26 -0800 | [diff] [blame] | 166 | } |
| 167 | |
| 168 | static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 169 | int nents, enum dma_data_direction dir, unsigned long attrs) |
Christoph Hellwig | 052c96d | 2016-01-20 15:01:26 -0800 | [diff] [blame] | 170 | { |
| 171 | struct scatterlist *s; |
| 172 | int i; |
| 173 | |
| 174 | for_each_sg(sg, s, nents, i) |
| 175 | s->dma_address = dma_map_page(dev, sg_page(s), s->offset, |
| 176 | s->length, dir); |
| 177 | |
| 178 | return nents; |
| 179 | } |
| 180 | |
| 181 | static void arc_dma_sync_single_for_cpu(struct device *dev, |
| 182 | dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) |
| 183 | { |
Vineet Gupta | f2e3d55 | 2016-03-16 16:38:57 +0530 | [diff] [blame] | 184 | _dma_cache_sync(plat_dma_to_phys(dev, dma_handle), size, DMA_FROM_DEVICE); |
Christoph Hellwig | 052c96d | 2016-01-20 15:01:26 -0800 | [diff] [blame] | 185 | } |
| 186 | |
| 187 | static void arc_dma_sync_single_for_device(struct device *dev, |
| 188 | dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) |
| 189 | { |
Vineet Gupta | f2e3d55 | 2016-03-16 16:38:57 +0530 | [diff] [blame] | 190 | _dma_cache_sync(plat_dma_to_phys(dev, dma_handle), size, DMA_TO_DEVICE); |
Christoph Hellwig | 052c96d | 2016-01-20 15:01:26 -0800 | [diff] [blame] | 191 | } |
| 192 | |
| 193 | static void arc_dma_sync_sg_for_cpu(struct device *dev, |
| 194 | struct scatterlist *sglist, int nelems, |
| 195 | enum dma_data_direction dir) |
| 196 | { |
| 197 | int i; |
| 198 | struct scatterlist *sg; |
| 199 | |
| 200 | for_each_sg(sglist, sg, nelems, i) |
Vineet Gupta | 971573c | 2016-03-16 14:51:33 +0530 | [diff] [blame] | 201 | _dma_cache_sync(sg_phys(sg), sg->length, dir); |
Christoph Hellwig | 052c96d | 2016-01-20 15:01:26 -0800 | [diff] [blame] | 202 | } |
| 203 | |
| 204 | static void arc_dma_sync_sg_for_device(struct device *dev, |
| 205 | struct scatterlist *sglist, int nelems, |
| 206 | enum dma_data_direction dir) |
| 207 | { |
| 208 | int i; |
| 209 | struct scatterlist *sg; |
| 210 | |
| 211 | for_each_sg(sglist, sg, nelems, i) |
Vineet Gupta | 971573c | 2016-03-16 14:51:33 +0530 | [diff] [blame] | 212 | _dma_cache_sync(sg_phys(sg), sg->length, dir); |
Christoph Hellwig | 052c96d | 2016-01-20 15:01:26 -0800 | [diff] [blame] | 213 | } |
| 214 | |
| 215 | static int arc_dma_supported(struct device *dev, u64 dma_mask) |
| 216 | { |
| 217 | /* Support 32 bit DMA mask exclusively */ |
| 218 | return dma_mask == DMA_BIT_MASK(32); |
| 219 | } |
| 220 | |
Bart Van Assche | 5299709 | 2017-01-20 13:04:01 -0800 | [diff] [blame] | 221 | const struct dma_map_ops arc_dma_ops = { |
Christoph Hellwig | 052c96d | 2016-01-20 15:01:26 -0800 | [diff] [blame] | 222 | .alloc = arc_dma_alloc, |
| 223 | .free = arc_dma_free, |
Alexey Brodkin | a79a812 | 2016-11-03 18:06:13 +0300 | [diff] [blame] | 224 | .mmap = arc_dma_mmap, |
Christoph Hellwig | 052c96d | 2016-01-20 15:01:26 -0800 | [diff] [blame] | 225 | .map_page = arc_dma_map_page, |
| 226 | .map_sg = arc_dma_map_sg, |
| 227 | .sync_single_for_device = arc_dma_sync_single_for_device, |
| 228 | .sync_single_for_cpu = arc_dma_sync_single_for_cpu, |
| 229 | .sync_sg_for_cpu = arc_dma_sync_sg_for_cpu, |
| 230 | .sync_sg_for_device = arc_dma_sync_sg_for_device, |
| 231 | .dma_supported = arc_dma_supported, |
| 232 | }; |
| 233 | EXPORT_SYMBOL(arc_dma_ops); |