Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Michal Simek | ccfe27d | 2010-01-14 11:21:02 +0100 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2009-2010 PetaLogix |
| 4 | * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation |
| 5 | * |
| 6 | * Provide default implementations of the DMA mapping callbacks for |
| 7 | * directly mapped busses. |
| 8 | */ |
| 9 | |
| 10 | #include <linux/device.h> |
| 11 | #include <linux/dma-mapping.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 12 | #include <linux/gfp.h> |
Michal Simek | ccfe27d | 2010-01-14 11:21:02 +0100 | [diff] [blame] | 13 | #include <linux/dma-debug.h> |
Paul Gortmaker | 66421a6 | 2011-09-22 11:22:55 -0400 | [diff] [blame] | 14 | #include <linux/export.h> |
Michal Simek | 6bd55f0 | 2012-12-27 10:40:38 +0100 | [diff] [blame] | 15 | #include <linux/bug.h> |
Michal Simek | ccfe27d | 2010-01-14 11:21:02 +0100 | [diff] [blame] | 16 | |
Michal Simek | 1be53e0 | 2010-03-11 14:15:48 +0100 | [diff] [blame] | 17 | #define NOT_COHERENT_CACHE |
| 18 | |
| 19 | static void *dma_direct_alloc_coherent(struct device *dev, size_t size, |
Andrzej Pietrasiewicz | 988624e | 2012-03-27 14:56:04 +0200 | [diff] [blame] | 20 | dma_addr_t *dma_handle, gfp_t flag, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 21 | unsigned long attrs) |
Michal Simek | ccfe27d | 2010-01-14 11:21:02 +0100 | [diff] [blame] | 22 | { |
Michal Simek | 1be53e0 | 2010-03-11 14:15:48 +0100 | [diff] [blame] | 23 | #ifdef NOT_COHERENT_CACHE |
| 24 | return consistent_alloc(flag, size, dma_handle); |
| 25 | #else |
Michal Simek | ccfe27d | 2010-01-14 11:21:02 +0100 | [diff] [blame] | 26 | void *ret; |
| 27 | struct page *page; |
| 28 | int node = dev_to_node(dev); |
| 29 | |
| 30 | /* ignore region specifiers */ |
| 31 | flag &= ~(__GFP_HIGHMEM); |
| 32 | |
| 33 | page = alloc_pages_node(node, flag, get_order(size)); |
| 34 | if (page == NULL) |
| 35 | return NULL; |
| 36 | ret = page_address(page); |
| 37 | memset(ret, 0, size); |
Michal Simek | 193bca5 | 2014-05-16 13:37:02 +0200 | [diff] [blame] | 38 | *dma_handle = virt_to_phys(ret); |
Michal Simek | ccfe27d | 2010-01-14 11:21:02 +0100 | [diff] [blame] | 39 | |
| 40 | return ret; |
Michal Simek | 1be53e0 | 2010-03-11 14:15:48 +0100 | [diff] [blame] | 41 | #endif |
Michal Simek | ccfe27d | 2010-01-14 11:21:02 +0100 | [diff] [blame] | 42 | } |
| 43 | |
Michal Simek | 1be53e0 | 2010-03-11 14:15:48 +0100 | [diff] [blame] | 44 | static void dma_direct_free_coherent(struct device *dev, size_t size, |
Andrzej Pietrasiewicz | 988624e | 2012-03-27 14:56:04 +0200 | [diff] [blame] | 45 | void *vaddr, dma_addr_t dma_handle, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 46 | unsigned long attrs) |
Michal Simek | ccfe27d | 2010-01-14 11:21:02 +0100 | [diff] [blame] | 47 | { |
Michal Simek | 1be53e0 | 2010-03-11 14:15:48 +0100 | [diff] [blame] | 48 | #ifdef NOT_COHERENT_CACHE |
Michal Simek | f152576 | 2010-04-10 17:34:06 +0200 | [diff] [blame] | 49 | consistent_free(size, vaddr); |
Michal Simek | 1be53e0 | 2010-03-11 14:15:48 +0100 | [diff] [blame] | 50 | #else |
Michal Simek | ccfe27d | 2010-01-14 11:21:02 +0100 | [diff] [blame] | 51 | free_pages((unsigned long)vaddr, get_order(size)); |
Michal Simek | 1be53e0 | 2010-03-11 14:15:48 +0100 | [diff] [blame] | 52 | #endif |
Michal Simek | ccfe27d | 2010-01-14 11:21:02 +0100 | [diff] [blame] | 53 | } |
| 54 | |
| 55 | static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, |
| 56 | int nents, enum dma_data_direction direction, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 57 | unsigned long attrs) |
Michal Simek | ccfe27d | 2010-01-14 11:21:02 +0100 | [diff] [blame] | 58 | { |
| 59 | struct scatterlist *sg; |
| 60 | int i; |
| 61 | |
Michal Simek | d79f3b0 | 2010-02-08 12:13:10 +0100 | [diff] [blame] | 62 | /* FIXME this part of code is untested */ |
Michal Simek | ccfe27d | 2010-01-14 11:21:02 +0100 | [diff] [blame] | 63 | for_each_sg(sgl, sg, nents, i) { |
Michal Simek | 193bca5 | 2014-05-16 13:37:02 +0200 | [diff] [blame] | 64 | sg->dma_address = sg_phys(sg); |
Alexander Duyck | 98ac2fc | 2016-12-14 15:04:55 -0800 | [diff] [blame] | 65 | |
| 66 | if (attrs & DMA_ATTR_SKIP_CPU_SYNC) |
| 67 | continue; |
| 68 | |
Geliang Tang | e56751c | 2017-03-23 21:16:32 +0800 | [diff] [blame] | 69 | __dma_sync(sg_phys(sg), sg->length, direction); |
Michal Simek | ccfe27d | 2010-01-14 11:21:02 +0100 | [diff] [blame] | 70 | } |
| 71 | |
| 72 | return nents; |
| 73 | } |
| 74 | |
Michal Simek | ccfe27d | 2010-01-14 11:21:02 +0100 | [diff] [blame] | 75 | static int dma_direct_dma_supported(struct device *dev, u64 mask) |
| 76 | { |
| 77 | return 1; |
| 78 | } |
| 79 | |
| 80 | static inline dma_addr_t dma_direct_map_page(struct device *dev, |
| 81 | struct page *page, |
| 82 | unsigned long offset, |
| 83 | size_t size, |
Michal Simek | 2549edd | 2010-01-20 14:36:24 +0100 | [diff] [blame] | 84 | enum dma_data_direction direction, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 85 | unsigned long attrs) |
Michal Simek | ccfe27d | 2010-01-14 11:21:02 +0100 | [diff] [blame] | 86 | { |
Alexander Duyck | 98ac2fc | 2016-12-14 15:04:55 -0800 | [diff] [blame] | 87 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
| 88 | __dma_sync(page_to_phys(page) + offset, size, direction); |
Michal Simek | 193bca5 | 2014-05-16 13:37:02 +0200 | [diff] [blame] | 89 | return page_to_phys(page) + offset; |
Michal Simek | ccfe27d | 2010-01-14 11:21:02 +0100 | [diff] [blame] | 90 | } |
| 91 | |
| 92 | static inline void dma_direct_unmap_page(struct device *dev, |
| 93 | dma_addr_t dma_address, |
| 94 | size_t size, |
| 95 | enum dma_data_direction direction, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 96 | unsigned long attrs) |
Michal Simek | ccfe27d | 2010-01-14 11:21:02 +0100 | [diff] [blame] | 97 | { |
Michal Simek | d79f3b0 | 2010-02-08 12:13:10 +0100 | [diff] [blame] | 98 | /* There is not necessary to do cache cleanup |
| 99 | * |
| 100 | * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and |
| 101 | * dma_address is physical address |
| 102 | */ |
Alexander Duyck | 98ac2fc | 2016-12-14 15:04:55 -0800 | [diff] [blame] | 103 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
| 104 | __dma_sync(dma_address, size, direction); |
Michal Simek | ccfe27d | 2010-01-14 11:21:02 +0100 | [diff] [blame] | 105 | } |
| 106 | |
Eli Billauer | 0fb2a6f | 2011-09-11 22:43:07 +0300 | [diff] [blame] | 107 | static inline void |
| 108 | dma_direct_sync_single_for_cpu(struct device *dev, |
| 109 | dma_addr_t dma_handle, size_t size, |
| 110 | enum dma_data_direction direction) |
| 111 | { |
| 112 | /* |
| 113 | * It's pointless to flush the cache as the memory segment |
| 114 | * is given to the CPU |
| 115 | */ |
| 116 | |
| 117 | if (direction == DMA_FROM_DEVICE) |
| 118 | __dma_sync(dma_handle, size, direction); |
| 119 | } |
| 120 | |
| 121 | static inline void |
| 122 | dma_direct_sync_single_for_device(struct device *dev, |
| 123 | dma_addr_t dma_handle, size_t size, |
| 124 | enum dma_data_direction direction) |
| 125 | { |
| 126 | /* |
| 127 | * It's pointless to invalidate the cache if the device isn't |
| 128 | * supposed to write to the relevant region |
| 129 | */ |
| 130 | |
| 131 | if (direction == DMA_TO_DEVICE) |
| 132 | __dma_sync(dma_handle, size, direction); |
| 133 | } |
| 134 | |
| 135 | static inline void |
| 136 | dma_direct_sync_sg_for_cpu(struct device *dev, |
| 137 | struct scatterlist *sgl, int nents, |
| 138 | enum dma_data_direction direction) |
| 139 | { |
| 140 | struct scatterlist *sg; |
| 141 | int i; |
| 142 | |
| 143 | /* FIXME this part of code is untested */ |
| 144 | if (direction == DMA_FROM_DEVICE) |
| 145 | for_each_sg(sgl, sg, nents, i) |
| 146 | __dma_sync(sg->dma_address, sg->length, direction); |
| 147 | } |
| 148 | |
| 149 | static inline void |
| 150 | dma_direct_sync_sg_for_device(struct device *dev, |
| 151 | struct scatterlist *sgl, int nents, |
| 152 | enum dma_data_direction direction) |
| 153 | { |
| 154 | struct scatterlist *sg; |
| 155 | int i; |
| 156 | |
| 157 | /* FIXME this part of code is untested */ |
| 158 | if (direction == DMA_TO_DEVICE) |
| 159 | for_each_sg(sgl, sg, nents, i) |
| 160 | __dma_sync(sg->dma_address, sg->length, direction); |
| 161 | } |
| 162 | |
Michal Simek | 55ae2f3 | 2015-06-05 10:35:31 +0200 | [diff] [blame] | 163 | static |
Lars-Peter Clausen | 3a8e326 | 2014-12-03 16:07:28 +0100 | [diff] [blame] | 164 | int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, |
| 165 | void *cpu_addr, dma_addr_t handle, size_t size, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 166 | unsigned long attrs) |
Lars-Peter Clausen | 3a8e326 | 2014-12-03 16:07:28 +0100 | [diff] [blame] | 167 | { |
| 168 | #ifdef CONFIG_MMU |
Thomas Meyer | 64c9985 | 2017-09-21 00:29:36 +0200 | [diff] [blame] | 169 | unsigned long user_count = vma_pages(vma); |
Lars-Peter Clausen | 3a8e326 | 2014-12-03 16:07:28 +0100 | [diff] [blame] | 170 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
| 171 | unsigned long off = vma->vm_pgoff; |
| 172 | unsigned long pfn; |
| 173 | |
| 174 | if (off >= count || user_count > (count - off)) |
| 175 | return -ENXIO; |
| 176 | |
| 177 | #ifdef NOT_COHERENT_CACHE |
| 178 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
| 179 | pfn = consistent_virt_to_pfn(cpu_addr); |
| 180 | #else |
| 181 | pfn = virt_to_pfn(cpu_addr); |
| 182 | #endif |
| 183 | return remap_pfn_range(vma, vma->vm_start, pfn + off, |
| 184 | vma->vm_end - vma->vm_start, vma->vm_page_prot); |
| 185 | #else |
| 186 | return -ENXIO; |
| 187 | #endif |
| 188 | } |
| 189 | |
Bart Van Assche | 5299709 | 2017-01-20 13:04:01 -0800 | [diff] [blame] | 190 | const struct dma_map_ops dma_direct_ops = { |
Andrzej Pietrasiewicz | 988624e | 2012-03-27 14:56:04 +0200 | [diff] [blame] | 191 | .alloc = dma_direct_alloc_coherent, |
| 192 | .free = dma_direct_free_coherent, |
Lars-Peter Clausen | 3a8e326 | 2014-12-03 16:07:28 +0100 | [diff] [blame] | 193 | .mmap = dma_direct_mmap_coherent, |
Michal Simek | ccfe27d | 2010-01-14 11:21:02 +0100 | [diff] [blame] | 194 | .map_sg = dma_direct_map_sg, |
Michal Simek | ccfe27d | 2010-01-14 11:21:02 +0100 | [diff] [blame] | 195 | .dma_supported = dma_direct_dma_supported, |
| 196 | .map_page = dma_direct_map_page, |
| 197 | .unmap_page = dma_direct_unmap_page, |
Eli Billauer | 0fb2a6f | 2011-09-11 22:43:07 +0300 | [diff] [blame] | 198 | .sync_single_for_cpu = dma_direct_sync_single_for_cpu, |
| 199 | .sync_single_for_device = dma_direct_sync_single_for_device, |
| 200 | .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu, |
| 201 | .sync_sg_for_device = dma_direct_sync_sg_for_device, |
Michal Simek | ccfe27d | 2010-01-14 11:21:02 +0100 | [diff] [blame] | 202 | }; |
| 203 | EXPORT_SYMBOL(dma_direct_ops); |
| 204 | |
| 205 | /* Number of entries preallocated for DMA-API debugging */ |
| 206 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) |
| 207 | |
| 208 | static int __init dma_init(void) |
| 209 | { |
Michal Simek | 6bd55f0 | 2012-12-27 10:40:38 +0100 | [diff] [blame] | 210 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); |
Michal Simek | ccfe27d | 2010-01-14 11:21:02 +0100 | [diff] [blame] | 211 | |
Michal Simek | 6bd55f0 | 2012-12-27 10:40:38 +0100 | [diff] [blame] | 212 | return 0; |
Michal Simek | ccfe27d | 2010-01-14 11:21:02 +0100 | [diff] [blame] | 213 | } |
| 214 | fs_initcall(dma_init); |