blob: c2ddd3d1b8831a98fe139639e131f458bfc41d99 [file] [log] [blame]
Glauber Costa6f536632008-03-25 18:36:20 -03001#ifndef _ASM_DMA_MAPPING_H_
2#define _ASM_DMA_MAPPING_H_
3
4/*
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
6 * documentation.
7 */
8
9#include <linux/scatterlist.h>
10#include <asm/io.h>
11#include <asm/swiotlb.h>
12
Glauber Costa7c183412008-03-25 18:36:36 -030013extern dma_addr_t bad_dma_address;
Glauber Costab7107a32008-03-25 18:36:39 -030014extern int iommu_merge;
15extern struct device fallback_dev;
16extern int panic_on_overflow;
Glauber Costafae9a0d2008-04-08 13:20:56 -030017extern int force_iommu;
Glauber Costa7c183412008-03-25 18:36:36 -030018
Glauber Costa6f536632008-03-25 18:36:20 -030019struct dma_mapping_ops {
20 int (*mapping_error)(dma_addr_t dma_addr);
21 void* (*alloc_coherent)(struct device *dev, size_t size,
22 dma_addr_t *dma_handle, gfp_t gfp);
23 void (*free_coherent)(struct device *dev, size_t size,
24 void *vaddr, dma_addr_t dma_handle);
Ingo Molnar2be62142008-04-19 19:19:56 +020025 dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr,
Glauber Costa6f536632008-03-25 18:36:20 -030026 size_t size, int direction);
27 /* like map_single, but doesn't check the device mask */
Ingo Molnar2be62142008-04-19 19:19:56 +020028 dma_addr_t (*map_simple)(struct device *hwdev, phys_addr_t ptr,
Glauber Costa6f536632008-03-25 18:36:20 -030029 size_t size, int direction);
30 void (*unmap_single)(struct device *dev, dma_addr_t addr,
31 size_t size, int direction);
32 void (*sync_single_for_cpu)(struct device *hwdev,
33 dma_addr_t dma_handle, size_t size,
34 int direction);
35 void (*sync_single_for_device)(struct device *hwdev,
36 dma_addr_t dma_handle, size_t size,
37 int direction);
38 void (*sync_single_range_for_cpu)(struct device *hwdev,
39 dma_addr_t dma_handle, unsigned long offset,
40 size_t size, int direction);
41 void (*sync_single_range_for_device)(struct device *hwdev,
42 dma_addr_t dma_handle, unsigned long offset,
43 size_t size, int direction);
44 void (*sync_sg_for_cpu)(struct device *hwdev,
45 struct scatterlist *sg, int nelems,
46 int direction);
47 void (*sync_sg_for_device)(struct device *hwdev,
48 struct scatterlist *sg, int nelems,
49 int direction);
50 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
51 int nents, int direction);
52 void (*unmap_sg)(struct device *hwdev,
53 struct scatterlist *sg, int nents,
54 int direction);
55 int (*dma_supported)(struct device *hwdev, u64 mask);
56 int is_phys;
57};
58
Glauber Costa22456b92008-03-25 18:36:21 -030059extern const struct dma_mapping_ops *dma_ops;
60
Glauber Costac786df02008-03-25 18:36:37 -030061static inline int dma_mapping_error(dma_addr_t dma_addr)
62{
63 if (dma_ops->mapping_error)
64 return dma_ops->mapping_error(dma_addr);
65
66 return (dma_addr == bad_dma_address);
67}
68
Glauber Costa8d396de2008-03-25 18:36:31 -030069#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
70#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
71
72void *dma_alloc_coherent(struct device *dev, size_t size,
73 dma_addr_t *dma_handle, gfp_t flag);
74
75void dma_free_coherent(struct device *dev, size_t size,
76 void *vaddr, dma_addr_t dma_handle);
77
78
Glauber Costa802c1f62008-03-25 18:36:34 -030079extern int dma_supported(struct device *hwdev, u64 mask);
80extern int dma_set_mask(struct device *dev, u64 mask);
81
Glauber Costa22456b92008-03-25 18:36:21 -030082static inline dma_addr_t
83dma_map_single(struct device *hwdev, void *ptr, size_t size,
84 int direction)
85{
86 BUG_ON(!valid_dma_direction(direction));
Ingo Molnar2be62142008-04-19 19:19:56 +020087 return dma_ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
Glauber Costa22456b92008-03-25 18:36:21 -030088}
89
Glauber Costa0cb0ae62008-03-25 18:36:22 -030090static inline void
91dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
92 int direction)
93{
94 BUG_ON(!valid_dma_direction(direction));
95 if (dma_ops->unmap_single)
96 dma_ops->unmap_single(dev, addr, size, direction);
97}
98
Glauber Costa16a3ce92008-03-25 18:36:23 -030099static inline int
100dma_map_sg(struct device *hwdev, struct scatterlist *sg,
101 int nents, int direction)
102{
103 BUG_ON(!valid_dma_direction(direction));
104 return dma_ops->map_sg(hwdev, sg, nents, direction);
105}
Glauber Costa72c784f2008-03-25 18:36:24 -0300106
107static inline void
108dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
109 int direction)
110{
111 BUG_ON(!valid_dma_direction(direction));
112 if (dma_ops->unmap_sg)
113 dma_ops->unmap_sg(hwdev, sg, nents, direction);
114}
Glauber Costac01dd8c2008-03-25 18:36:25 -0300115
116static inline void
117dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
118 size_t size, int direction)
119{
120 BUG_ON(!valid_dma_direction(direction));
121 if (dma_ops->sync_single_for_cpu)
122 dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
123 direction);
124 flush_write_buffers();
125}
126
Glauber Costa9231b262008-03-25 18:36:26 -0300127static inline void
128dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
129 size_t size, int direction)
130{
131 BUG_ON(!valid_dma_direction(direction));
132 if (dma_ops->sync_single_for_device)
133 dma_ops->sync_single_for_device(hwdev, dma_handle, size,
134 direction);
135 flush_write_buffers();
136}
137
Glauber Costa627610f2008-03-25 18:36:27 -0300138static inline void
139dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
140 unsigned long offset, size_t size, int direction)
141{
142 BUG_ON(!valid_dma_direction(direction));
143 if (dma_ops->sync_single_range_for_cpu)
144 dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
145 size, direction);
146
147 flush_write_buffers();
148}
Glauber Costa71362332008-03-25 18:36:28 -0300149
150static inline void
151dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
152 unsigned long offset, size_t size,
153 int direction)
154{
155 BUG_ON(!valid_dma_direction(direction));
156 if (dma_ops->sync_single_range_for_device)
157 dma_ops->sync_single_range_for_device(hwdev, dma_handle,
158 offset, size, direction);
159
160 flush_write_buffers();
161}
162
Glauber Costaed435de2008-03-25 18:36:29 -0300163static inline void
164dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
165 int nelems, int direction)
166{
167 BUG_ON(!valid_dma_direction(direction));
168 if (dma_ops->sync_sg_for_cpu)
169 dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
170 flush_write_buffers();
171}
Glauber Costae7f3a912008-03-25 18:36:30 -0300172
173static inline void
174dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
175 int nelems, int direction)
176{
177 BUG_ON(!valid_dma_direction(direction));
178 if (dma_ops->sync_sg_for_device)
179 dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
180
181 flush_write_buffers();
182}
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300183
184static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
185 size_t offset, size_t size,
186 int direction)
187{
Ingo Molnar2be62142008-04-19 19:19:56 +0200188 BUG_ON(!valid_dma_direction(direction));
189 return dma_ops->map_single(dev, page_to_phys(page)+offset,
190 size, direction);
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300191}
192
193static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
194 size_t size, int direction)
195{
196 dma_unmap_single(dev, addr, size, direction);
197}
198
Glauber Costa3cb6a912008-03-25 18:36:33 -0300199static inline void
200dma_cache_sync(struct device *dev, void *vaddr, size_t size,
201 enum dma_data_direction dir)
202{
203 flush_write_buffers();
204}
Glauber Costaae17a63b2008-03-25 18:36:38 -0300205
Glauber Costab7107a32008-03-25 18:36:39 -0300206static inline int dma_get_cache_alignment(void)
207{
208 /* no easy way to get cache size on all x86, so return the
209 * maximum possible, to be safe */
210 return boot_cpu_data.x86_clflush_size;
211}
212
213#define dma_is_consistent(d, h) (1)
214
Glauber Costaae17a63b2008-03-25 18:36:38 -0300215#ifdef CONFIG_X86_32
216# define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
Glauber Costa8e8edc62008-04-08 13:20:57 -0300217struct dma_coherent_mem {
218 void *virt_base;
219 u32 device_base;
220 int size;
221 int flags;
222 unsigned long *bitmap;
223};
224
Glauber Costaae17a63b2008-03-25 18:36:38 -0300225extern int
226dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
227 dma_addr_t device_addr, size_t size, int flags);
228
229extern void
230dma_release_declared_memory(struct device *dev);
231
232extern void *
233dma_mark_declared_memory_occupied(struct device *dev,
234 dma_addr_t device_addr, size_t size);
235#endif /* CONFIG_X86_32 */
Glauber Costa6f536632008-03-25 18:36:20 -0300236#endif