blob: 914846d0beaaa9dab1fbd862505b4d7832b747cd [file] [log] [blame]
Glauber Costa6f536632008-03-25 18:36:20 -03001#ifndef _ASM_DMA_MAPPING_H_
2#define _ASM_DMA_MAPPING_H_
3
4/*
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
6 * documentation.
7 */
8
9#include <linux/scatterlist.h>
10#include <asm/io.h>
11#include <asm/swiotlb.h>
12
Glauber Costa7c183412008-03-25 18:36:36 -030013extern dma_addr_t bad_dma_address;
Glauber Costab7107a32008-03-25 18:36:39 -030014extern int iommu_merge;
15extern struct device fallback_dev;
16extern int panic_on_overflow;
Glauber Costa7c183412008-03-25 18:36:36 -030017
Glauber Costa6f536632008-03-25 18:36:20 -030018struct dma_mapping_ops {
19 int (*mapping_error)(dma_addr_t dma_addr);
20 void* (*alloc_coherent)(struct device *dev, size_t size,
21 dma_addr_t *dma_handle, gfp_t gfp);
22 void (*free_coherent)(struct device *dev, size_t size,
23 void *vaddr, dma_addr_t dma_handle);
Ingo Molnar2be62142008-04-19 19:19:56 +020024 dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr,
Glauber Costa6f536632008-03-25 18:36:20 -030025 size_t size, int direction);
26 /* like map_single, but doesn't check the device mask */
Ingo Molnar2be62142008-04-19 19:19:56 +020027 dma_addr_t (*map_simple)(struct device *hwdev, phys_addr_t ptr,
Glauber Costa6f536632008-03-25 18:36:20 -030028 size_t size, int direction);
29 void (*unmap_single)(struct device *dev, dma_addr_t addr,
30 size_t size, int direction);
31 void (*sync_single_for_cpu)(struct device *hwdev,
32 dma_addr_t dma_handle, size_t size,
33 int direction);
34 void (*sync_single_for_device)(struct device *hwdev,
35 dma_addr_t dma_handle, size_t size,
36 int direction);
37 void (*sync_single_range_for_cpu)(struct device *hwdev,
38 dma_addr_t dma_handle, unsigned long offset,
39 size_t size, int direction);
40 void (*sync_single_range_for_device)(struct device *hwdev,
41 dma_addr_t dma_handle, unsigned long offset,
42 size_t size, int direction);
43 void (*sync_sg_for_cpu)(struct device *hwdev,
44 struct scatterlist *sg, int nelems,
45 int direction);
46 void (*sync_sg_for_device)(struct device *hwdev,
47 struct scatterlist *sg, int nelems,
48 int direction);
49 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
50 int nents, int direction);
51 void (*unmap_sg)(struct device *hwdev,
52 struct scatterlist *sg, int nents,
53 int direction);
54 int (*dma_supported)(struct device *hwdev, u64 mask);
55 int is_phys;
56};
57
Glauber Costa22456b92008-03-25 18:36:21 -030058extern const struct dma_mapping_ops *dma_ops;
59
Glauber Costac786df02008-03-25 18:36:37 -030060static inline int dma_mapping_error(dma_addr_t dma_addr)
61{
62 if (dma_ops->mapping_error)
63 return dma_ops->mapping_error(dma_addr);
64
65 return (dma_addr == bad_dma_address);
66}
67
Glauber Costa8d396de2008-03-25 18:36:31 -030068#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
69#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
70
71void *dma_alloc_coherent(struct device *dev, size_t size,
72 dma_addr_t *dma_handle, gfp_t flag);
73
74void dma_free_coherent(struct device *dev, size_t size,
75 void *vaddr, dma_addr_t dma_handle);
76
77
Glauber Costa802c1f62008-03-25 18:36:34 -030078extern int dma_supported(struct device *hwdev, u64 mask);
79extern int dma_set_mask(struct device *dev, u64 mask);
80
Glauber Costa22456b92008-03-25 18:36:21 -030081static inline dma_addr_t
82dma_map_single(struct device *hwdev, void *ptr, size_t size,
83 int direction)
84{
85 BUG_ON(!valid_dma_direction(direction));
Ingo Molnar2be62142008-04-19 19:19:56 +020086 return dma_ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
Glauber Costa22456b92008-03-25 18:36:21 -030087}
88
Glauber Costa0cb0ae62008-03-25 18:36:22 -030089static inline void
90dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
91 int direction)
92{
93 BUG_ON(!valid_dma_direction(direction));
94 if (dma_ops->unmap_single)
95 dma_ops->unmap_single(dev, addr, size, direction);
96}
97
Glauber Costa16a3ce92008-03-25 18:36:23 -030098static inline int
99dma_map_sg(struct device *hwdev, struct scatterlist *sg,
100 int nents, int direction)
101{
102 BUG_ON(!valid_dma_direction(direction));
103 return dma_ops->map_sg(hwdev, sg, nents, direction);
104}
Glauber Costa72c784f2008-03-25 18:36:24 -0300105
106static inline void
107dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
108 int direction)
109{
110 BUG_ON(!valid_dma_direction(direction));
111 if (dma_ops->unmap_sg)
112 dma_ops->unmap_sg(hwdev, sg, nents, direction);
113}
Glauber Costac01dd8c2008-03-25 18:36:25 -0300114
115static inline void
116dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
117 size_t size, int direction)
118{
119 BUG_ON(!valid_dma_direction(direction));
120 if (dma_ops->sync_single_for_cpu)
121 dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
122 direction);
123 flush_write_buffers();
124}
125
Glauber Costa9231b262008-03-25 18:36:26 -0300126static inline void
127dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
128 size_t size, int direction)
129{
130 BUG_ON(!valid_dma_direction(direction));
131 if (dma_ops->sync_single_for_device)
132 dma_ops->sync_single_for_device(hwdev, dma_handle, size,
133 direction);
134 flush_write_buffers();
135}
136
Glauber Costa627610f2008-03-25 18:36:27 -0300137static inline void
138dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
139 unsigned long offset, size_t size, int direction)
140{
141 BUG_ON(!valid_dma_direction(direction));
142 if (dma_ops->sync_single_range_for_cpu)
143 dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
144 size, direction);
145
146 flush_write_buffers();
147}
Glauber Costa71362332008-03-25 18:36:28 -0300148
149static inline void
150dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
151 unsigned long offset, size_t size,
152 int direction)
153{
154 BUG_ON(!valid_dma_direction(direction));
155 if (dma_ops->sync_single_range_for_device)
156 dma_ops->sync_single_range_for_device(hwdev, dma_handle,
157 offset, size, direction);
158
159 flush_write_buffers();
160}
161
Glauber Costaed435de2008-03-25 18:36:29 -0300162static inline void
163dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
164 int nelems, int direction)
165{
166 BUG_ON(!valid_dma_direction(direction));
167 if (dma_ops->sync_sg_for_cpu)
168 dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
169 flush_write_buffers();
170}
Glauber Costae7f3a912008-03-25 18:36:30 -0300171
172static inline void
173dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
174 int nelems, int direction)
175{
176 BUG_ON(!valid_dma_direction(direction));
177 if (dma_ops->sync_sg_for_device)
178 dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
179
180 flush_write_buffers();
181}
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300182
183static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
184 size_t offset, size_t size,
185 int direction)
186{
Ingo Molnar2be62142008-04-19 19:19:56 +0200187 BUG_ON(!valid_dma_direction(direction));
188 return dma_ops->map_single(dev, page_to_phys(page)+offset,
189 size, direction);
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300190}
191
192static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
193 size_t size, int direction)
194{
195 dma_unmap_single(dev, addr, size, direction);
196}
197
Glauber Costa3cb6a912008-03-25 18:36:33 -0300198static inline void
199dma_cache_sync(struct device *dev, void *vaddr, size_t size,
200 enum dma_data_direction dir)
201{
202 flush_write_buffers();
203}
Glauber Costaae17a63b2008-03-25 18:36:38 -0300204
Glauber Costab7107a32008-03-25 18:36:39 -0300205static inline int dma_get_cache_alignment(void)
206{
207 /* no easy way to get cache size on all x86, so return the
208 * maximum possible, to be safe */
209 return boot_cpu_data.x86_clflush_size;
210}
211
212#define dma_is_consistent(d, h) (1)
213
Glauber Costaae17a63b2008-03-25 18:36:38 -0300214#ifdef CONFIG_X86_32
215# define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
216extern int
217dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
218 dma_addr_t device_addr, size_t size, int flags);
219
220extern void
221dma_release_declared_memory(struct device *dev);
222
223extern void *
224dma_mark_declared_memory_occupied(struct device *dev,
225 dma_addr_t device_addr, size_t size);
Glauber Costab7107a32008-03-25 18:36:39 -0300226extern int forbid_dac;
Glauber Costaae17a63b2008-03-25 18:36:38 -0300227#endif /* CONFIG_X86_32 */
Glauber Costa6f536632008-03-25 18:36:20 -0300228#endif