blob: e63f154be9645679caa57ef047b539193b89745a [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Michal Simekccfe27d2010-01-14 11:21:02 +01002/*
3 * Copyright (C) 2009-2010 PetaLogix
4 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
5 *
6 * Provide default implementations of the DMA mapping callbacks for
7 * directly mapped busses.
8 */
9
10#include <linux/device.h>
11#include <linux/dma-mapping.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/gfp.h>
Michal Simekccfe27d2010-01-14 11:21:02 +010013#include <linux/dma-debug.h>
Paul Gortmaker66421a62011-09-22 11:22:55 -040014#include <linux/export.h>
Michal Simek6bd55f02012-12-27 10:40:38 +010015#include <linux/bug.h>
Michal Simekccfe27d2010-01-14 11:21:02 +010016
Michal Simek1be53e02010-03-11 14:15:48 +010017#define NOT_COHERENT_CACHE
18
19static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewicz988624e2012-03-27 14:56:04 +020020 dma_addr_t *dma_handle, gfp_t flag,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070021 unsigned long attrs)
Michal Simekccfe27d2010-01-14 11:21:02 +010022{
Michal Simek1be53e02010-03-11 14:15:48 +010023#ifdef NOT_COHERENT_CACHE
24 return consistent_alloc(flag, size, dma_handle);
25#else
Michal Simekccfe27d2010-01-14 11:21:02 +010026 void *ret;
27 struct page *page;
28 int node = dev_to_node(dev);
29
30 /* ignore region specifiers */
31 flag &= ~(__GFP_HIGHMEM);
32
33 page = alloc_pages_node(node, flag, get_order(size));
34 if (page == NULL)
35 return NULL;
36 ret = page_address(page);
37 memset(ret, 0, size);
Michal Simek193bca52014-05-16 13:37:02 +020038 *dma_handle = virt_to_phys(ret);
Michal Simekccfe27d2010-01-14 11:21:02 +010039
40 return ret;
Michal Simek1be53e02010-03-11 14:15:48 +010041#endif
Michal Simekccfe27d2010-01-14 11:21:02 +010042}
43
Michal Simek1be53e02010-03-11 14:15:48 +010044static void dma_direct_free_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewicz988624e2012-03-27 14:56:04 +020045 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070046 unsigned long attrs)
Michal Simekccfe27d2010-01-14 11:21:02 +010047{
Michal Simek1be53e02010-03-11 14:15:48 +010048#ifdef NOT_COHERENT_CACHE
Michal Simekf1525762010-04-10 17:34:06 +020049 consistent_free(size, vaddr);
Michal Simek1be53e02010-03-11 14:15:48 +010050#else
Michal Simekccfe27d2010-01-14 11:21:02 +010051 free_pages((unsigned long)vaddr, get_order(size));
Michal Simek1be53e02010-03-11 14:15:48 +010052#endif
Michal Simekccfe27d2010-01-14 11:21:02 +010053}
54
55static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
56 int nents, enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070057 unsigned long attrs)
Michal Simekccfe27d2010-01-14 11:21:02 +010058{
59 struct scatterlist *sg;
60 int i;
61
Michal Simekd79f3b02010-02-08 12:13:10 +010062 /* FIXME this part of code is untested */
Michal Simekccfe27d2010-01-14 11:21:02 +010063 for_each_sg(sgl, sg, nents, i) {
Michal Simek193bca52014-05-16 13:37:02 +020064 sg->dma_address = sg_phys(sg);
Alexander Duyck98ac2fc2016-12-14 15:04:55 -080065
66 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
67 continue;
68
Geliang Tange56751c2017-03-23 21:16:32 +080069 __dma_sync(sg_phys(sg), sg->length, direction);
Michal Simekccfe27d2010-01-14 11:21:02 +010070 }
71
72 return nents;
73}
74
Michal Simekccfe27d2010-01-14 11:21:02 +010075static int dma_direct_dma_supported(struct device *dev, u64 mask)
76{
77 return 1;
78}
79
80static inline dma_addr_t dma_direct_map_page(struct device *dev,
81 struct page *page,
82 unsigned long offset,
83 size_t size,
Michal Simek2549edd2010-01-20 14:36:24 +010084 enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070085 unsigned long attrs)
Michal Simekccfe27d2010-01-14 11:21:02 +010086{
Alexander Duyck98ac2fc2016-12-14 15:04:55 -080087 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
88 __dma_sync(page_to_phys(page) + offset, size, direction);
Michal Simek193bca52014-05-16 13:37:02 +020089 return page_to_phys(page) + offset;
Michal Simekccfe27d2010-01-14 11:21:02 +010090}
91
92static inline void dma_direct_unmap_page(struct device *dev,
93 dma_addr_t dma_address,
94 size_t size,
95 enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070096 unsigned long attrs)
Michal Simekccfe27d2010-01-14 11:21:02 +010097{
Michal Simekd79f3b02010-02-08 12:13:10 +010098/* There is not necessary to do cache cleanup
99 *
100 * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
101 * dma_address is physical address
102 */
Alexander Duyck98ac2fc2016-12-14 15:04:55 -0800103 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
104 __dma_sync(dma_address, size, direction);
Michal Simekccfe27d2010-01-14 11:21:02 +0100105}
106
Eli Billauer0fb2a6f2011-09-11 22:43:07 +0300107static inline void
108dma_direct_sync_single_for_cpu(struct device *dev,
109 dma_addr_t dma_handle, size_t size,
110 enum dma_data_direction direction)
111{
112 /*
113 * It's pointless to flush the cache as the memory segment
114 * is given to the CPU
115 */
116
117 if (direction == DMA_FROM_DEVICE)
118 __dma_sync(dma_handle, size, direction);
119}
120
121static inline void
122dma_direct_sync_single_for_device(struct device *dev,
123 dma_addr_t dma_handle, size_t size,
124 enum dma_data_direction direction)
125{
126 /*
127 * It's pointless to invalidate the cache if the device isn't
128 * supposed to write to the relevant region
129 */
130
131 if (direction == DMA_TO_DEVICE)
132 __dma_sync(dma_handle, size, direction);
133}
134
135static inline void
136dma_direct_sync_sg_for_cpu(struct device *dev,
137 struct scatterlist *sgl, int nents,
138 enum dma_data_direction direction)
139{
140 struct scatterlist *sg;
141 int i;
142
143 /* FIXME this part of code is untested */
144 if (direction == DMA_FROM_DEVICE)
145 for_each_sg(sgl, sg, nents, i)
146 __dma_sync(sg->dma_address, sg->length, direction);
147}
148
149static inline void
150dma_direct_sync_sg_for_device(struct device *dev,
151 struct scatterlist *sgl, int nents,
152 enum dma_data_direction direction)
153{
154 struct scatterlist *sg;
155 int i;
156
157 /* FIXME this part of code is untested */
158 if (direction == DMA_TO_DEVICE)
159 for_each_sg(sgl, sg, nents, i)
160 __dma_sync(sg->dma_address, sg->length, direction);
161}
162
Michal Simek55ae2f32015-06-05 10:35:31 +0200163static
Lars-Peter Clausen3a8e3262014-12-03 16:07:28 +0100164int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
165 void *cpu_addr, dma_addr_t handle, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700166 unsigned long attrs)
Lars-Peter Clausen3a8e3262014-12-03 16:07:28 +0100167{
168#ifdef CONFIG_MMU
Thomas Meyer64c99852017-09-21 00:29:36 +0200169 unsigned long user_count = vma_pages(vma);
Lars-Peter Clausen3a8e3262014-12-03 16:07:28 +0100170 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
171 unsigned long off = vma->vm_pgoff;
172 unsigned long pfn;
173
174 if (off >= count || user_count > (count - off))
175 return -ENXIO;
176
177#ifdef NOT_COHERENT_CACHE
178 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
179 pfn = consistent_virt_to_pfn(cpu_addr);
180#else
181 pfn = virt_to_pfn(cpu_addr);
182#endif
183 return remap_pfn_range(vma, vma->vm_start, pfn + off,
184 vma->vm_end - vma->vm_start, vma->vm_page_prot);
185#else
186 return -ENXIO;
187#endif
188}
189
Bart Van Assche52997092017-01-20 13:04:01 -0800190const struct dma_map_ops dma_direct_ops = {
Andrzej Pietrasiewicz988624e2012-03-27 14:56:04 +0200191 .alloc = dma_direct_alloc_coherent,
192 .free = dma_direct_free_coherent,
Lars-Peter Clausen3a8e3262014-12-03 16:07:28 +0100193 .mmap = dma_direct_mmap_coherent,
Michal Simekccfe27d2010-01-14 11:21:02 +0100194 .map_sg = dma_direct_map_sg,
Michal Simekccfe27d2010-01-14 11:21:02 +0100195 .dma_supported = dma_direct_dma_supported,
196 .map_page = dma_direct_map_page,
197 .unmap_page = dma_direct_unmap_page,
Eli Billauer0fb2a6f2011-09-11 22:43:07 +0300198 .sync_single_for_cpu = dma_direct_sync_single_for_cpu,
199 .sync_single_for_device = dma_direct_sync_single_for_device,
200 .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
201 .sync_sg_for_device = dma_direct_sync_sg_for_device,
Michal Simekccfe27d2010-01-14 11:21:02 +0100202};
203EXPORT_SYMBOL(dma_direct_ops);
204
205/* Number of entries preallocated for DMA-API debugging */
206#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
207
208static int __init dma_init(void)
209{
Michal Simek6bd55f02012-12-27 10:40:38 +0100210 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
Michal Simekccfe27d2010-01-14 11:21:02 +0100211
Michal Simek6bd55f02012-12-27 10:40:38 +0100212 return 0;
Michal Simekccfe27d2010-01-14 11:21:02 +0100213}
214fs_initcall(dma_init);