blob: 1f5f6dc097365ab4f4be0a00a481859dc29e4256 [file] [log] [blame]
Chris Zankel9a8fd552005-06-23 22:01:26 -07001/*
Uwe Zeisbergerf30c2262006-10-03 23:01:26 +02002 * include/asm-xtensa/dma-mapping.h
Chris Zankel9a8fd552005-06-23 22:01:26 -07003 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2003 - 2005 Tensilica Inc.
9 */
10
11#ifndef _XTENSA_DMA_MAPPING_H
12#define _XTENSA_DMA_MAPPING_H
13
Chris Zankel9a8fd552005-06-23 22:01:26 -070014#include <asm/cache.h>
15#include <asm/io.h>
16#include <linux/mm.h>
Jens Axboe8c7837c2007-10-24 13:28:40 +020017#include <linux/scatterlist.h>
Chris Zankel9a8fd552005-06-23 22:01:26 -070018
Max Filippov35b16a92012-11-01 18:38:27 +040019#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
20
Chris Zankel9a8fd552005-06-23 22:01:26 -070021/*
22 * DMA-consistent mapping functions.
23 */
24
25extern void *consistent_alloc(int, size_t, dma_addr_t, unsigned long);
26extern void consistent_free(void*, size_t, dma_addr_t);
27extern void consistent_sync(void*, size_t, int);
28
29#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
30#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
31
32void *dma_alloc_coherent(struct device *dev, size_t size,
Al Viro5fb5cbe2005-10-21 03:21:48 -040033 dma_addr_t *dma_handle, gfp_t flag);
Chris Zankel9a8fd552005-06-23 22:01:26 -070034
35void dma_free_coherent(struct device *dev, size_t size,
36 void *vaddr, dma_addr_t dma_handle);
37
38static inline dma_addr_t
39dma_map_single(struct device *dev, void *ptr, size_t size,
40 enum dma_data_direction direction)
41{
42 BUG_ON(direction == DMA_NONE);
43 consistent_sync(ptr, size, direction);
44 return virt_to_phys(ptr);
45}
46
47static inline void
48dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
49 enum dma_data_direction direction)
50{
51 BUG_ON(direction == DMA_NONE);
52}
53
54static inline int
Akinobu Mita3693a842015-06-24 16:55:51 -070055dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
Chris Zankel9a8fd552005-06-23 22:01:26 -070056 enum dma_data_direction direction)
57{
58 int i;
Akinobu Mita3693a842015-06-24 16:55:51 -070059 struct scatterlist *sg;
Chris Zankel9a8fd552005-06-23 22:01:26 -070060
61 BUG_ON(direction == DMA_NONE);
62
Akinobu Mita3693a842015-06-24 16:55:51 -070063 for_each_sg(sglist, sg, nents, i) {
Emil Medve891039a2007-10-23 20:38:41 +020064 BUG_ON(!sg_page(sg));
Chris Zankel9a8fd552005-06-23 22:01:26 -070065
Emil Medve891039a2007-10-23 20:38:41 +020066 sg->dma_address = sg_phys(sg);
67 consistent_sync(sg_virt(sg), sg->length, direction);
Chris Zankel9a8fd552005-06-23 22:01:26 -070068 }
69
70 return nents;
71}
72
73static inline dma_addr_t
74dma_map_page(struct device *dev, struct page *page, unsigned long offset,
75 size_t size, enum dma_data_direction direction)
76{
77 BUG_ON(direction == DMA_NONE);
78 return (dma_addr_t)(page_to_pfn(page)) * PAGE_SIZE + offset;
79}
80
81static inline void
82dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
83 enum dma_data_direction direction)
84{
85 BUG_ON(direction == DMA_NONE);
86}
87
88
89static inline void
90dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
91 enum dma_data_direction direction)
92{
93 BUG_ON(direction == DMA_NONE);
94}
95
96static inline void
97dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
98 enum dma_data_direction direction)
99{
100 consistent_sync((void *)bus_to_virt(dma_handle), size, direction);
101}
102
103static inline void
Chris Zankelc4c45942012-11-28 16:53:51 -0800104dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
105 size_t size, enum dma_data_direction direction)
Chris Zankel9a8fd552005-06-23 22:01:26 -0700106{
107 consistent_sync((void *)bus_to_virt(dma_handle), size, direction);
108}
109
110static inline void
111dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
112 unsigned long offset, size_t size,
113 enum dma_data_direction direction)
114{
115
116 consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction);
117}
118
119static inline void
120dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
121 unsigned long offset, size_t size,
122 enum dma_data_direction direction)
123{
124
125 consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction);
126}
127static inline void
Akinobu Mita3693a842015-06-24 16:55:51 -0700128dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems,
Chris Zankel9a8fd552005-06-23 22:01:26 -0700129 enum dma_data_direction dir)
130{
131 int i;
Akinobu Mita3693a842015-06-24 16:55:51 -0700132 struct scatterlist *sg;
133
134 for_each_sg(sglist, sg, nelems, i)
Emil Medve891039a2007-10-23 20:38:41 +0200135 consistent_sync(sg_virt(sg), sg->length, dir);
Chris Zankel9a8fd552005-06-23 22:01:26 -0700136}
137
138static inline void
Akinobu Mita3693a842015-06-24 16:55:51 -0700139dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
140 int nelems, enum dma_data_direction dir)
Chris Zankel9a8fd552005-06-23 22:01:26 -0700141{
142 int i;
Akinobu Mita3693a842015-06-24 16:55:51 -0700143 struct scatterlist *sg;
144
145 for_each_sg(sglist, sg, nelems, i)
Emil Medve891039a2007-10-23 20:38:41 +0200146 consistent_sync(sg_virt(sg), sg->length, dir);
Chris Zankel9a8fd552005-06-23 22:01:26 -0700147}
148static inline int
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700149dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
Chris Zankel9a8fd552005-06-23 22:01:26 -0700150{
151 return 0;
152}
153
154static inline int
155dma_supported(struct device *dev, u64 mask)
156{
157 return 1;
158}
159
160static inline int
161dma_set_mask(struct device *dev, u64 mask)
162{
163 if(!dev->dma_mask || !dma_supported(dev, mask))
164 return -EIO;
165
166 *dev->dma_mask = mask;
167
168 return 0;
169}
170
Chris Zankel9a8fd552005-06-23 22:01:26 -0700171static inline void
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800172dma_cache_sync(struct device *dev, void *vaddr, size_t size,
Chris Zankel9a8fd552005-06-23 22:01:26 -0700173 enum dma_data_direction direction)
174{
175 consistent_sync(vaddr, size, direction);
176}
177
Geert Uytterhoevenda57b932013-01-27 09:33:28 +0000178/* Not supported for now */
179static inline int dma_mmap_coherent(struct device *dev,
180 struct vm_area_struct *vma, void *cpu_addr,
181 dma_addr_t dma_addr, size_t size)
182{
183 return -EINVAL;
184}
185
186static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
187 void *cpu_addr, dma_addr_t dma_addr,
188 size_t size)
189{
190 return -EINVAL;
191}
192
Guenter Roecke74993a2015-05-04 15:30:47 -0700193static inline void *dma_alloc_attrs(struct device *dev, size_t size,
194 dma_addr_t *dma_handle, gfp_t flag,
195 struct dma_attrs *attrs)
196{
197 return NULL;
198}
199
200static inline void dma_free_attrs(struct device *dev, size_t size,
201 void *vaddr, dma_addr_t dma_handle,
202 struct dma_attrs *attrs)
203{
204}
205
Chris Zankel9a8fd552005-06-23 22:01:26 -0700206#endif /* _XTENSA_DMA_MAPPING_H */