blob: 79c74659f204b7da1aed7172f255f4556cbc77fb [file] [log] [blame]
Michal Simekccfe27d2010-01-14 11:21:02 +01001/*
2 * Copyright (C) 2009-2010 PetaLogix
3 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
4 *
5 * Provide default implementations of the DMA mapping callbacks for
6 * directly mapped busses.
7 */
8
9#include <linux/device.h>
10#include <linux/dma-mapping.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090011#include <linux/gfp.h>
Michal Simekccfe27d2010-01-14 11:21:02 +010012#include <linux/dma-debug.h>
13#include <asm/bug.h>
Michal Simek2549edd2010-01-20 14:36:24 +010014#include <asm/cacheflush.h>
Michal Simekccfe27d2010-01-14 11:21:02 +010015
16/*
17 * Generic direct DMA implementation
18 *
19 * This implementation supports a per-device offset that can be applied if
20 * the address at which memory is visible to devices is not 0. Platform code
21 * can set archdata.dma_data to an unsigned long holding the offset. By
22 * default the offset is PCI_DRAM_OFFSET.
23 */
Michal Simekdcbae4b2010-02-09 09:25:08 +010024static inline void __dma_sync_page(unsigned long paddr, unsigned long offset,
Michal Simek2549edd2010-01-20 14:36:24 +010025 size_t size, enum dma_data_direction direction)
26{
Michal Simek2549edd2010-01-20 14:36:24 +010027 switch (direction) {
28 case DMA_TO_DEVICE:
Michal Simekdcbae4b2010-02-09 09:25:08 +010029 flush_dcache_range(paddr + offset, paddr + offset + size);
Michal Simek2549edd2010-01-20 14:36:24 +010030 break;
31 case DMA_FROM_DEVICE:
Michal Simekdcbae4b2010-02-09 09:25:08 +010032 invalidate_dcache_range(paddr + offset, paddr + offset + size);
Michal Simek2549edd2010-01-20 14:36:24 +010033 break;
34 default:
35 BUG();
36 }
37}
38
Michal Simekccfe27d2010-01-14 11:21:02 +010039static unsigned long get_dma_direct_offset(struct device *dev)
40{
Michal Simek78ebfa82010-03-23 15:37:02 +010041 if (likely(dev))
Michal Simekccfe27d2010-01-14 11:21:02 +010042 return (unsigned long)dev->archdata.dma_data;
43
44 return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */
45}
46
Michal Simek1be53e02010-03-11 14:15:48 +010047#define NOT_COHERENT_CACHE
48
49static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
Michal Simekccfe27d2010-01-14 11:21:02 +010050 dma_addr_t *dma_handle, gfp_t flag)
51{
Michal Simek1be53e02010-03-11 14:15:48 +010052#ifdef NOT_COHERENT_CACHE
53 return consistent_alloc(flag, size, dma_handle);
54#else
Michal Simekccfe27d2010-01-14 11:21:02 +010055 void *ret;
56 struct page *page;
57 int node = dev_to_node(dev);
58
59 /* ignore region specifiers */
60 flag &= ~(__GFP_HIGHMEM);
61
62 page = alloc_pages_node(node, flag, get_order(size));
63 if (page == NULL)
64 return NULL;
65 ret = page_address(page);
66 memset(ret, 0, size);
67 *dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev);
68
69 return ret;
Michal Simek1be53e02010-03-11 14:15:48 +010070#endif
Michal Simekccfe27d2010-01-14 11:21:02 +010071}
72
Michal Simek1be53e02010-03-11 14:15:48 +010073static void dma_direct_free_coherent(struct device *dev, size_t size,
Michal Simekccfe27d2010-01-14 11:21:02 +010074 void *vaddr, dma_addr_t dma_handle)
75{
Michal Simek1be53e02010-03-11 14:15:48 +010076#ifdef NOT_COHERENT_CACHE
Michal Simekf1525762010-04-10 17:34:06 +020077 consistent_free(size, vaddr);
Michal Simek1be53e02010-03-11 14:15:48 +010078#else
Michal Simekccfe27d2010-01-14 11:21:02 +010079 free_pages((unsigned long)vaddr, get_order(size));
Michal Simek1be53e02010-03-11 14:15:48 +010080#endif
Michal Simekccfe27d2010-01-14 11:21:02 +010081}
82
83static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
84 int nents, enum dma_data_direction direction,
85 struct dma_attrs *attrs)
86{
87 struct scatterlist *sg;
88 int i;
89
Michal Simekd79f3b02010-02-08 12:13:10 +010090 /* FIXME this part of code is untested */
Michal Simekccfe27d2010-01-14 11:21:02 +010091 for_each_sg(sgl, sg, nents, i) {
92 sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
Michal Simekd79f3b02010-02-08 12:13:10 +010093 __dma_sync_page(page_to_phys(sg_page(sg)), sg->offset,
94 sg->length, direction);
Michal Simekccfe27d2010-01-14 11:21:02 +010095 }
96
97 return nents;
98}
99
100static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
101 int nents, enum dma_data_direction direction,
102 struct dma_attrs *attrs)
103{
104}
105
106static int dma_direct_dma_supported(struct device *dev, u64 mask)
107{
108 return 1;
109}
110
111static inline dma_addr_t dma_direct_map_page(struct device *dev,
112 struct page *page,
113 unsigned long offset,
114 size_t size,
Michal Simek2549edd2010-01-20 14:36:24 +0100115 enum dma_data_direction direction,
Michal Simekccfe27d2010-01-14 11:21:02 +0100116 struct dma_attrs *attrs)
117{
Michal Simekd79f3b02010-02-08 12:13:10 +0100118 __dma_sync_page(page_to_phys(page), offset, size, direction);
Michal Simekccfe27d2010-01-14 11:21:02 +0100119 return page_to_phys(page) + offset + get_dma_direct_offset(dev);
120}
121
122static inline void dma_direct_unmap_page(struct device *dev,
123 dma_addr_t dma_address,
124 size_t size,
125 enum dma_data_direction direction,
126 struct dma_attrs *attrs)
127{
Michal Simekd79f3b02010-02-08 12:13:10 +0100128/* There is not necessary to do cache cleanup
129 *
130 * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
131 * dma_address is physical address
132 */
Michal Simek1be53e02010-03-11 14:15:48 +0100133 __dma_sync_page(dma_address, 0 , size, direction);
Michal Simekccfe27d2010-01-14 11:21:02 +0100134}
135
136struct dma_map_ops dma_direct_ops = {
137 .alloc_coherent = dma_direct_alloc_coherent,
138 .free_coherent = dma_direct_free_coherent,
139 .map_sg = dma_direct_map_sg,
140 .unmap_sg = dma_direct_unmap_sg,
141 .dma_supported = dma_direct_dma_supported,
142 .map_page = dma_direct_map_page,
143 .unmap_page = dma_direct_unmap_page,
144};
145EXPORT_SYMBOL(dma_direct_ops);
146
147/* Number of entries preallocated for DMA-API debugging */
148#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
149
150static int __init dma_init(void)
151{
152 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
153
154 return 0;
155}
156fs_initcall(dma_init);