blob: 4ad25337f92765a794b01060d151ccb9b24597d6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef ASMARM_DMA_MAPPING_H
2#define ASMARM_DMA_MAPPING_H
3
4#ifdef __KERNEL__
5
Russell King98ed7d42008-08-10 12:10:49 +01006#include <linux/mm_types.h>
Jens Axboedee9ba82007-10-23 12:37:59 +02007#include <linux/scatterlist.h>
Russell King24056f52011-01-03 11:29:28 +00008#include <linux/dma-debug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009
Dmitry Baryshkov1fe53262008-07-18 13:30:14 +040010#include <asm-generic/dma-coherent.h>
Russell King98ed7d42008-08-10 12:10:49 +010011#include <asm/memory.h>
12
Russell King9eedd962011-01-03 00:00:17 +000013#ifdef __arch_page_to_dma
14#error Please update to __arch_pfn_to_dma
15#endif
16
Russell King98ed7d42008-08-10 12:10:49 +010017/*
Russell King9eedd962011-01-03 00:00:17 +000018 * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
19 * functions used internally by the DMA-mapping API to provide DMA
20 * addresses. They must not be used by drivers.
Russell King98ed7d42008-08-10 12:10:49 +010021 */
Russell King9eedd962011-01-03 00:00:17 +000022#ifndef __arch_pfn_to_dma
23static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
Nicolas Pitre58edb512008-09-09 15:54:13 -040024{
Russell King9eedd962011-01-03 00:00:17 +000025 return (dma_addr_t)__pfn_to_bus(pfn);
Nicolas Pitre58edb512008-09-09 15:54:13 -040026}
Russell King98ed7d42008-08-10 12:10:49 +010027
Russell King9eedd962011-01-03 00:00:17 +000028static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
Russell Kingef1baed2009-10-31 16:07:16 +000029{
Russell King9eedd962011-01-03 00:00:17 +000030 return __bus_to_pfn(addr);
Russell Kingef1baed2009-10-31 16:07:16 +000031}
32
Russell King98ed7d42008-08-10 12:10:49 +010033static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
34{
35 return (void *)__bus_to_virt(addr);
36}
37
38static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
39{
40 return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
41}
42#else
Russell King9eedd962011-01-03 00:00:17 +000043static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
Russell King98ed7d42008-08-10 12:10:49 +010044{
Russell King9eedd962011-01-03 00:00:17 +000045 return __arch_pfn_to_dma(dev, pfn);
Russell King98ed7d42008-08-10 12:10:49 +010046}
47
Russell King9eedd962011-01-03 00:00:17 +000048static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
Russell Kingef1baed2009-10-31 16:07:16 +000049{
Russell King9eedd962011-01-03 00:00:17 +000050 return __arch_dma_to_pfn(dev, addr);
Russell Kingef1baed2009-10-31 16:07:16 +000051}
52
Russell King98ed7d42008-08-10 12:10:49 +010053static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
54{
55 return __arch_dma_to_virt(dev, addr);
56}
57
58static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
59{
60 return __arch_virt_to_dma(dev, addr);
61}
62#endif
Dmitry Baryshkov1fe53262008-07-18 13:30:14 +040063
Linus Torvalds1da177e2005-04-16 15:20:36 -070064/*
Russell King18eabe22009-10-31 16:52:16 +000065 * The DMA API is built upon the notion of "buffer ownership". A buffer
66 * is either exclusively owned by the CPU (and therefore may be accessed
67 * by it) or exclusively owned by the DMA device. These helper functions
68 * represent the transitions between these two ownership states.
69 *
Russell King4ea0d732009-11-24 16:27:17 +000070 * Note, however, that on later ARMs, this notion does not work due to
71 * speculative prefetches. We model our approach on the assumption that
72 * the CPU does do speculative prefetches, which means we clean caches
73 * before transfers and delay cache invalidation until transfer completion.
74 *
75 * Private support functions: these are not part of the API and are
76 * liable to change. Drivers must not use these.
Russell King18eabe22009-10-31 16:52:16 +000077 */
78static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
79 enum dma_data_direction dir)
80{
Russell King4ea0d732009-11-24 16:27:17 +000081 extern void ___dma_single_cpu_to_dev(const void *, size_t,
82 enum dma_data_direction);
83
Russell King18eabe22009-10-31 16:52:16 +000084 if (!arch_is_coherent())
Russell King4ea0d732009-11-24 16:27:17 +000085 ___dma_single_cpu_to_dev(kaddr, size, dir);
Russell King18eabe22009-10-31 16:52:16 +000086}
87
88static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
89 enum dma_data_direction dir)
90{
Russell King4ea0d732009-11-24 16:27:17 +000091 extern void ___dma_single_dev_to_cpu(const void *, size_t,
92 enum dma_data_direction);
93
94 if (!arch_is_coherent())
95 ___dma_single_dev_to_cpu(kaddr, size, dir);
Russell King18eabe22009-10-31 16:52:16 +000096}
97
98static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
99 size_t size, enum dma_data_direction dir)
100{
Russell King4ea0d732009-11-24 16:27:17 +0000101 extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
102 size_t, enum dma_data_direction);
103
Russell King18eabe22009-10-31 16:52:16 +0000104 if (!arch_is_coherent())
Russell King4ea0d732009-11-24 16:27:17 +0000105 ___dma_page_cpu_to_dev(page, off, size, dir);
Russell King18eabe22009-10-31 16:52:16 +0000106}
107
108static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
109 size_t size, enum dma_data_direction dir)
110{
Russell King4ea0d732009-11-24 16:27:17 +0000111 extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
112 size_t, enum dma_data_direction);
113
114 if (!arch_is_coherent())
115 ___dma_page_dev_to_cpu(page, off, size, dir);
Russell King18eabe22009-10-31 16:52:16 +0000116}
117
118/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 * Return whether the given device DMA address mask can be supported
120 * properly. For example, if your device can only drive the low 24-bits
121 * during bus mastering, then you would pass 0x00ffffff as the mask
122 * to this function.
akpm@osdl.org7a228aa2005-04-16 15:23:57 -0700123 *
124 * FIXME: This should really be a platform specific issue - we should
125 * return false if GFP_DMA allocations may not satisfy the supplied 'mask'.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 */
127static inline int dma_supported(struct device *dev, u64 mask)
128{
Russell King1124d6d2008-10-20 11:18:40 +0100129 if (mask < ISA_DMA_THRESHOLD)
130 return 0;
131 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132}
133
134static inline int dma_set_mask(struct device *dev, u64 dma_mask)
135{
FUJITA Tomonori6fee48c2010-03-10 15:23:40 -0800136#ifdef CONFIG_DMABOUNCE
137 if (dev->archdata.dmabounce) {
138 if (dma_mask >= ISA_DMA_THRESHOLD)
139 return 0;
140 else
141 return -EIO;
142 }
143#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
145 return -EIO;
146
147 *dev->dma_mask = dma_mask;
148
149 return 0;
150}
151
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152/*
153 * DMA errors are defined by all-bits-set in the DMA address.
154 */
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700155static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156{
157 return dma_addr == ~0;
158}
159
Russell Kingf454aa62007-02-12 19:26:05 +0000160/*
161 * Dummy noncoherent implementation. We don't provide a dma_cache_sync
162 * function so drivers using this API are highlighted with build warnings.
163 */
Russell King3216a972008-09-25 22:23:31 +0100164static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
165 dma_addr_t *handle, gfp_t gfp)
Russell Kingf454aa62007-02-12 19:26:05 +0000166{
167 return NULL;
168}
169
Russell King3216a972008-09-25 22:23:31 +0100170static inline void dma_free_noncoherent(struct device *dev, size_t size,
171 void *cpu_addr, dma_addr_t handle)
Russell Kingf454aa62007-02-12 19:26:05 +0000172{
173}
174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175/**
176 * dma_alloc_coherent - allocate consistent memory for DMA
177 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
178 * @size: required memory size
179 * @handle: bus-specific DMA address
180 *
181 * Allocate some uncached, unbuffered memory for a device for
182 * performing DMA. This function allocates pages, and will
183 * return the CPU-viewed address, and sets @handle to be the
184 * device-viewed address.
185 */
Russell King3216a972008-09-25 22:23:31 +0100186extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187
188/**
189 * dma_free_coherent - free memory allocated by dma_alloc_coherent
190 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
191 * @size: size of memory originally requested in dma_alloc_coherent
192 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
193 * @handle: device-view address returned from dma_alloc_coherent
194 *
195 * Free (and unmap) a DMA buffer previously allocated by
196 * dma_alloc_coherent().
197 *
198 * References to memory and mappings associated with cpu_addr/handle
199 * during and after this call executing are illegal.
200 */
Russell King3216a972008-09-25 22:23:31 +0100201extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
203/**
204 * dma_mmap_coherent - map a coherent DMA allocation into user space
205 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
206 * @vma: vm_area_struct describing requested user mapping
207 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
208 * @handle: device-view address returned from dma_alloc_coherent
209 * @size: size of memory originally requested in dma_alloc_coherent
210 *
211 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
212 * into user space. The coherent DMA buffer must not be freed by the
213 * driver until the user space mapping has been released.
214 */
Russell King3216a972008-09-25 22:23:31 +0100215int dma_mmap_coherent(struct device *, struct vm_area_struct *,
216 void *, dma_addr_t, size_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
218
219/**
220 * dma_alloc_writecombine - allocate writecombining memory for DMA
221 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
222 * @size: required memory size
223 * @handle: bus-specific DMA address
224 *
225 * Allocate some uncached, buffered memory for a device for
226 * performing DMA. This function allocates pages, and will
227 * return the CPU-viewed address, and sets @handle to be the
228 * device-viewed address.
229 */
Russell King3216a972008-09-25 22:23:31 +0100230extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
231 gfp_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
233#define dma_free_writecombine(dev,size,cpu_addr,handle) \
234 dma_free_coherent(dev,size,cpu_addr,handle)
235
Russell King3216a972008-09-25 22:23:31 +0100236int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
237 void *, dma_addr_t, size_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238
239
Russell King8c8a0ec52008-09-25 21:52:49 +0100240#ifdef CONFIG_DMABOUNCE
241/*
242 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
243 * and utilize bounce buffers as needed to work around limited DMA windows.
244 *
245 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
246 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
247 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
248 *
249 * The following are helper functions used by the dmabounce subystem
250 *
251 */
252
253/**
254 * dmabounce_register_dev
255 *
256 * @dev: valid struct device pointer
257 * @small_buf_size: size of buffers to use with small buffer pool
258 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
Russell King0703ed22011-07-04 08:32:21 +0100259 * @needs_bounce_fn: called to determine whether buffer needs bouncing
Russell King8c8a0ec52008-09-25 21:52:49 +0100260 *
261 * This function should be called by low-level platform code to register
262 * a device as requireing DMA buffer bouncing. The function will allocate
263 * appropriate DMA pools for the device.
Russell King8c8a0ec52008-09-25 21:52:49 +0100264 */
Russell King3216a972008-09-25 22:23:31 +0100265extern int dmabounce_register_dev(struct device *, unsigned long,
Russell King0703ed22011-07-04 08:32:21 +0100266 unsigned long, int (*)(struct device *, dma_addr_t, size_t));
Russell King8c8a0ec52008-09-25 21:52:49 +0100267
268/**
269 * dmabounce_unregister_dev
270 *
271 * @dev: valid struct device pointer
272 *
273 * This function should be called by low-level platform code when device
274 * that was previously registered with dmabounce_register_dev is removed
275 * from the system.
276 *
277 */
278extern void dmabounce_unregister_dev(struct device *);
279
Russell King8c8a0ec52008-09-25 21:52:49 +0100280/*
Russell King125ab122008-09-25 22:16:22 +0100281 * The DMA API, implemented by dmabounce.c. See below for descriptions.
282 */
Russell King24056f52011-01-03 11:29:28 +0000283extern dma_addr_t __dma_map_page(struct device *, struct page *,
Russell King3216a972008-09-25 22:23:31 +0100284 unsigned long, size_t, enum dma_data_direction);
Russell King24056f52011-01-03 11:29:28 +0000285extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
Russell King3216a972008-09-25 22:23:31 +0100286 enum dma_data_direction);
Russell King125ab122008-09-25 22:16:22 +0100287
288/*
Russell King8c8a0ec52008-09-25 21:52:49 +0100289 * Private functions
290 */
291int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
Russell King3216a972008-09-25 22:23:31 +0100292 size_t, enum dma_data_direction);
Russell King8c8a0ec52008-09-25 21:52:49 +0100293int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
Russell King3216a972008-09-25 22:23:31 +0100294 size_t, enum dma_data_direction);
Russell King8c8a0ec52008-09-25 21:52:49 +0100295#else
Russell King9fa76792008-11-13 14:33:51 +0000296static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
297 unsigned long offset, size_t size, enum dma_data_direction dir)
298{
299 return 1;
300}
301
302static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
303 unsigned long offset, size_t size, enum dma_data_direction dir)
304{
305 return 1;
306}
Russell King8c8a0ec52008-09-25 21:52:49 +0100307
308
Russell King24056f52011-01-03 11:29:28 +0000309static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
310 unsigned long offset, size_t size, enum dma_data_direction dir)
311{
312 __dma_page_cpu_to_dev(page, offset, size, dir);
313 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
314}
315
Russell King24056f52011-01-03 11:29:28 +0000316static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
317 size_t size, enum dma_data_direction dir)
318{
319 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
320 handle & ~PAGE_MASK, size, dir);
321}
322#endif /* CONFIG_DMABOUNCE */
323
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324/**
325 * dma_map_single - map a single buffer for streaming DMA
326 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
327 * @cpu_addr: CPU direct mapped address of buffer
328 * @size: size of buffer to map
329 * @dir: DMA transfer direction
330 *
331 * Ensure that any data held in the cache is appropriately discarded
332 * or written back.
333 *
334 * The device owns this memory once this call has completed. The CPU
335 * can regain ownership by calling dma_unmap_single() or
336 * dma_sync_single_for_cpu().
337 */
Russell King3216a972008-09-25 22:23:31 +0100338static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
339 size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340{
Russell King8021a4a2011-07-03 16:13:58 +0100341 unsigned long offset;
342 struct page *page;
Russell King24056f52011-01-03 11:29:28 +0000343 dma_addr_t addr;
344
Russell King8021a4a2011-07-03 16:13:58 +0100345 BUG_ON(!virt_addr_valid(cpu_addr));
346 BUG_ON(!virt_addr_valid(cpu_addr + size - 1));
Russell King0e18b5d2008-09-29 13:48:17 +0100347 BUG_ON(!valid_dma_direction(dir));
348
Russell King8021a4a2011-07-03 16:13:58 +0100349 page = virt_to_page(cpu_addr);
350 offset = (unsigned long)cpu_addr & ~PAGE_MASK;
351 addr = __dma_map_page(dev, page, offset, size, dir);
352 debug_dma_map_page(dev, page, offset, size, dir, addr, true);
Lennert Buytenhek23759dc2006-04-02 00:07:39 +0100353
Russell King24056f52011-01-03 11:29:28 +0000354 return addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355}
Russell King125ab122008-09-25 22:16:22 +0100356
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357/**
358 * dma_map_page - map a portion of a page for streaming DMA
359 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
360 * @page: page that buffer resides in
361 * @offset: offset into page for start of buffer
362 * @size: size of buffer to map
363 * @dir: DMA transfer direction
364 *
365 * Ensure that any data held in the cache is appropriately discarded
366 * or written back.
367 *
368 * The device owns this memory once this call has completed. The CPU
Russell King7807c602008-09-30 11:30:24 +0100369 * can regain ownership by calling dma_unmap_page().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 */
Russell King3216a972008-09-25 22:23:31 +0100371static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
372 unsigned long offset, size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373{
Russell King24056f52011-01-03 11:29:28 +0000374 dma_addr_t addr;
375
Russell King0e18b5d2008-09-29 13:48:17 +0100376 BUG_ON(!valid_dma_direction(dir));
377
Russell King24056f52011-01-03 11:29:28 +0000378 addr = __dma_map_page(dev, page, offset, size, dir);
379 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
Russell King56f55f82008-09-25 20:59:12 +0100380
Russell King24056f52011-01-03 11:29:28 +0000381 return addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382}
383
384/**
385 * dma_unmap_single - unmap a single buffer previously mapped
386 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
387 * @handle: DMA address of buffer
Russell King7807c602008-09-30 11:30:24 +0100388 * @size: size of buffer (same as passed to dma_map_single)
389 * @dir: DMA transfer direction (same as passed to dma_map_single)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 *
391 * Unmap a single streaming mode DMA translation. The handle and size
392 * must match what was provided in the previous dma_map_single() call.
393 * All other usages are undefined.
394 *
395 * After this call, reads by the CPU to the buffer are guaranteed to see
396 * whatever the device wrote there.
397 */
Russell King3216a972008-09-25 22:23:31 +0100398static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
399 size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400{
Russell King24056f52011-01-03 11:29:28 +0000401 debug_dma_unmap_page(dev, handle, size, dir, true);
Russell King8021a4a2011-07-03 16:13:58 +0100402 __dma_unmap_page(dev, handle, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404
405/**
406 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
407 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
408 * @handle: DMA address of buffer
Russell King7807c602008-09-30 11:30:24 +0100409 * @size: size of buffer (same as passed to dma_map_page)
410 * @dir: DMA transfer direction (same as passed to dma_map_page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 *
Russell King7807c602008-09-30 11:30:24 +0100412 * Unmap a page streaming mode DMA translation. The handle and size
413 * must match what was provided in the previous dma_map_page() call.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 * All other usages are undefined.
415 *
416 * After this call, reads by the CPU to the buffer are guaranteed to see
417 * whatever the device wrote there.
418 */
Russell King3216a972008-09-25 22:23:31 +0100419static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
420 size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421{
Russell King24056f52011-01-03 11:29:28 +0000422 debug_dma_unmap_page(dev, handle, size, dir, false);
423 __dma_unmap_page(dev, handle, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424}
425
426/**
Russell King9dd42862008-08-10 12:18:26 +0100427 * dma_sync_single_range_for_cpu
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
429 * @handle: DMA address of buffer
Russell King9dd42862008-08-10 12:18:26 +0100430 * @offset: offset of region to start sync
431 * @size: size of region to sync
432 * @dir: DMA transfer direction (same as passed to dma_map_single)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 *
434 * Make physical memory consistent for a single streaming mode DMA
435 * translation after a transfer.
436 *
437 * If you perform a dma_map_single() but wish to interrogate the
438 * buffer using the cpu, yet do not wish to teardown the PCI dma
439 * mapping, you must call this function before doing so. At the
440 * next point you give the PCI dma address back to the card, you
441 * must first the perform a dma_sync_for_device, and then the
442 * device again owns the buffer.
443 */
Russell King3216a972008-09-25 22:23:31 +0100444static inline void dma_sync_single_range_for_cpu(struct device *dev,
445 dma_addr_t handle, unsigned long offset, size_t size,
446 enum dma_data_direction dir)
Russell King9dd42862008-08-10 12:18:26 +0100447{
Russell King0e18b5d2008-09-29 13:48:17 +0100448 BUG_ON(!valid_dma_direction(dir));
449
Russell King24056f52011-01-03 11:29:28 +0000450 debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);
451
Russell King18eabe22009-10-31 16:52:16 +0000452 if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
453 return;
454
455 __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
Russell King9dd42862008-08-10 12:18:26 +0100456}
457
Russell King3216a972008-09-25 22:23:31 +0100458static inline void dma_sync_single_range_for_device(struct device *dev,
459 dma_addr_t handle, unsigned long offset, size_t size,
460 enum dma_data_direction dir)
Russell King9dd42862008-08-10 12:18:26 +0100461{
Russell King0e18b5d2008-09-29 13:48:17 +0100462 BUG_ON(!valid_dma_direction(dir));
463
Russell King24056f52011-01-03 11:29:28 +0000464 debug_dma_sync_single_for_device(dev, handle + offset, size, dir);
465
Russell King8c8a0ec52008-09-25 21:52:49 +0100466 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
467 return;
468
Russell King18eabe22009-10-31 16:52:16 +0000469 __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
Russell King9dd42862008-08-10 12:18:26 +0100470}
Russell King9dd42862008-08-10 12:18:26 +0100471
Russell King3216a972008-09-25 22:23:31 +0100472static inline void dma_sync_single_for_cpu(struct device *dev,
473 dma_addr_t handle, size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474{
Russell King9dd42862008-08-10 12:18:26 +0100475 dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476}
477
Russell King3216a972008-09-25 22:23:31 +0100478static inline void dma_sync_single_for_device(struct device *dev,
479 dma_addr_t handle, size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480{
Russell King9dd42862008-08-10 12:18:26 +0100481 dma_sync_single_range_for_device(dev, handle, 0, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483
Russell Kingafd1a322008-09-25 16:30:57 +0100484/*
485 * The scatter list versions of the above methods.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 */
Russell King3216a972008-09-25 22:23:31 +0100487extern int dma_map_sg(struct device *, struct scatterlist *, int,
488 enum dma_data_direction);
489extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
490 enum dma_data_direction);
491extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
492 enum dma_data_direction);
493extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
494 enum dma_data_direction);
Russell Kingafd1a322008-09-25 16:30:57 +0100495
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497#endif /* __KERNEL__ */
498#endif