Richard Kuo | 65c3d7c | 2011-10-31 18:52:22 -0500 | [diff] [blame] | 1 | /* |
| 2 | * DMA implementation for Hexagon |
| 3 | * |
| 4 | * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 and |
| 8 | * only version 2 as published by the Free Software Foundation. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License |
| 16 | * along with this program; if not, write to the Free Software |
| 17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA |
| 18 | * 02110-1301, USA. |
| 19 | */ |
| 20 | |
| 21 | #include <linux/dma-mapping.h> |
| 22 | #include <linux/bootmem.h> |
| 23 | #include <linux/genalloc.h> |
| 24 | #include <asm/dma-mapping.h> |
Richard Kuo | 6bbbc30 | 2011-11-15 16:58:11 -0600 | [diff] [blame] | 25 | #include <linux/module.h> |
Richard Kuo | 65c3d7c | 2011-10-31 18:52:22 -0500 | [diff] [blame] | 26 | |
| 27 | struct dma_map_ops *dma_ops; |
| 28 | EXPORT_SYMBOL(dma_ops); |
| 29 | |
| 30 | int bad_dma_address; /* globals are automatically initialized to zero */ |
| 31 | |
| 32 | int dma_supported(struct device *dev, u64 mask) |
| 33 | { |
| 34 | if (mask == DMA_BIT_MASK(32)) |
| 35 | return 1; |
| 36 | else |
| 37 | return 0; |
| 38 | } |
| 39 | EXPORT_SYMBOL(dma_supported); |
| 40 | |
| 41 | int dma_set_mask(struct device *dev, u64 mask) |
| 42 | { |
| 43 | if (!dev->dma_mask || !dma_supported(dev, mask)) |
| 44 | return -EIO; |
| 45 | |
| 46 | *dev->dma_mask = mask; |
| 47 | |
| 48 | return 0; |
| 49 | } |
| 50 | EXPORT_SYMBOL(dma_set_mask); |
| 51 | |
| 52 | static struct gen_pool *coherent_pool; |
| 53 | |
| 54 | |
| 55 | /* Allocates from a pool of uncached memory that was reserved at boot time */ |
| 56 | |
| 57 | void *hexagon_dma_alloc_coherent(struct device *dev, size_t size, |
Marek Szyprowski | 7734552 | 2012-02-13 10:31:31 +0100 | [diff] [blame] | 58 | dma_addr_t *dma_addr, gfp_t flag, |
| 59 | struct dma_attrs *attrs) |
Richard Kuo | 65c3d7c | 2011-10-31 18:52:22 -0500 | [diff] [blame] | 60 | { |
| 61 | void *ret; |
| 62 | |
| 63 | if (coherent_pool == NULL) { |
| 64 | coherent_pool = gen_pool_create(PAGE_SHIFT, -1); |
| 65 | |
| 66 | if (coherent_pool == NULL) |
| 67 | panic("Can't create %s() memory pool!", __func__); |
| 68 | else |
| 69 | gen_pool_add(coherent_pool, |
| 70 | (PAGE_OFFSET + (max_low_pfn << PAGE_SHIFT)), |
| 71 | hexagon_coherent_pool_size, -1); |
| 72 | } |
| 73 | |
| 74 | ret = (void *) gen_pool_alloc(coherent_pool, size); |
| 75 | |
| 76 | if (ret) { |
| 77 | memset(ret, 0, size); |
| 78 | *dma_addr = (dma_addr_t) (ret - PAGE_OFFSET); |
| 79 | } else |
| 80 | *dma_addr = ~0; |
| 81 | |
| 82 | return ret; |
| 83 | } |
| 84 | |
| 85 | static void hexagon_free_coherent(struct device *dev, size_t size, void *vaddr, |
Marek Szyprowski | 7734552 | 2012-02-13 10:31:31 +0100 | [diff] [blame] | 86 | dma_addr_t dma_addr, struct dma_attrs *attrs) |
Richard Kuo | 65c3d7c | 2011-10-31 18:52:22 -0500 | [diff] [blame] | 87 | { |
| 88 | gen_pool_free(coherent_pool, (unsigned long) vaddr, size); |
| 89 | } |
| 90 | |
| 91 | static int check_addr(const char *name, struct device *hwdev, |
| 92 | dma_addr_t bus, size_t size) |
| 93 | { |
| 94 | if (hwdev && hwdev->dma_mask && !dma_capable(hwdev, bus, size)) { |
| 95 | if (*hwdev->dma_mask >= DMA_BIT_MASK(32)) |
| 96 | printk(KERN_ERR |
| 97 | "%s: overflow %Lx+%zu of device mask %Lx\n", |
| 98 | name, (long long)bus, size, |
| 99 | (long long)*hwdev->dma_mask); |
| 100 | return 0; |
| 101 | } |
| 102 | return 1; |
| 103 | } |
| 104 | |
| 105 | static int hexagon_map_sg(struct device *hwdev, struct scatterlist *sg, |
| 106 | int nents, enum dma_data_direction dir, |
| 107 | struct dma_attrs *attrs) |
| 108 | { |
| 109 | struct scatterlist *s; |
| 110 | int i; |
| 111 | |
| 112 | WARN_ON(nents == 0 || sg[0].length == 0); |
| 113 | |
| 114 | for_each_sg(sg, s, nents, i) { |
| 115 | s->dma_address = sg_phys(s); |
| 116 | if (!check_addr("map_sg", hwdev, s->dma_address, s->length)) |
| 117 | return 0; |
| 118 | |
| 119 | s->dma_length = s->length; |
| 120 | |
| 121 | flush_dcache_range(PAGE_OFFSET + s->dma_address, |
| 122 | PAGE_OFFSET + s->dma_address + s->length); |
| 123 | } |
| 124 | |
| 125 | return nents; |
| 126 | } |
| 127 | |
| 128 | /* |
| 129 | * address is virtual |
| 130 | */ |
| 131 | static inline void dma_sync(void *addr, size_t size, |
| 132 | enum dma_data_direction dir) |
| 133 | { |
| 134 | switch (dir) { |
| 135 | case DMA_TO_DEVICE: |
| 136 | hexagon_clean_dcache_range((unsigned long) addr, |
| 137 | (unsigned long) addr + size); |
| 138 | break; |
| 139 | case DMA_FROM_DEVICE: |
| 140 | hexagon_inv_dcache_range((unsigned long) addr, |
| 141 | (unsigned long) addr + size); |
| 142 | break; |
| 143 | case DMA_BIDIRECTIONAL: |
| 144 | flush_dcache_range((unsigned long) addr, |
| 145 | (unsigned long) addr + size); |
| 146 | break; |
| 147 | default: |
| 148 | BUG(); |
| 149 | } |
| 150 | } |
| 151 | |
| 152 | static inline void *dma_addr_to_virt(dma_addr_t dma_addr) |
| 153 | { |
| 154 | return phys_to_virt((unsigned long) dma_addr); |
| 155 | } |
| 156 | |
| 157 | /** |
| 158 | * hexagon_map_page() - maps an address for device DMA |
| 159 | * @dev: pointer to DMA device |
| 160 | * @page: pointer to page struct of DMA memory |
| 161 | * @offset: offset within page |
| 162 | * @size: size of memory to map |
| 163 | * @dir: transfer direction |
| 164 | * @attrs: pointer to DMA attrs (not used) |
| 165 | * |
| 166 | * Called to map a memory address to a DMA address prior |
| 167 | * to accesses to/from device. |
| 168 | * |
| 169 | * We don't particularly have many hoops to jump through |
| 170 | * so far. Straight translation between phys and virtual. |
| 171 | * |
| 172 | * DMA is not cache coherent so sync is necessary; this |
| 173 | * seems to be a convenient place to do it. |
| 174 | * |
| 175 | */ |
| 176 | static dma_addr_t hexagon_map_page(struct device *dev, struct page *page, |
| 177 | unsigned long offset, size_t size, |
| 178 | enum dma_data_direction dir, |
| 179 | struct dma_attrs *attrs) |
| 180 | { |
| 181 | dma_addr_t bus = page_to_phys(page) + offset; |
| 182 | WARN_ON(size == 0); |
| 183 | |
| 184 | if (!check_addr("map_single", dev, bus, size)) |
| 185 | return bad_dma_address; |
| 186 | |
| 187 | dma_sync(dma_addr_to_virt(bus), size, dir); |
| 188 | |
| 189 | return bus; |
| 190 | } |
| 191 | |
| 192 | static void hexagon_sync_single_for_cpu(struct device *dev, |
| 193 | dma_addr_t dma_handle, size_t size, |
| 194 | enum dma_data_direction dir) |
| 195 | { |
| 196 | dma_sync(dma_addr_to_virt(dma_handle), size, dir); |
| 197 | } |
| 198 | |
| 199 | static void hexagon_sync_single_for_device(struct device *dev, |
| 200 | dma_addr_t dma_handle, size_t size, |
| 201 | enum dma_data_direction dir) |
| 202 | { |
| 203 | dma_sync(dma_addr_to_virt(dma_handle), size, dir); |
| 204 | } |
| 205 | |
| 206 | struct dma_map_ops hexagon_dma_ops = { |
Marek Szyprowski | 7734552 | 2012-02-13 10:31:31 +0100 | [diff] [blame] | 207 | .alloc = hexagon_dma_alloc_coherent, |
| 208 | .free = hexagon_free_coherent, |
Richard Kuo | 65c3d7c | 2011-10-31 18:52:22 -0500 | [diff] [blame] | 209 | .map_sg = hexagon_map_sg, |
| 210 | .map_page = hexagon_map_page, |
| 211 | .sync_single_for_cpu = hexagon_sync_single_for_cpu, |
| 212 | .sync_single_for_device = hexagon_sync_single_for_device, |
| 213 | .is_phys = 1, |
| 214 | }; |
| 215 | |
| 216 | void __init hexagon_dma_init(void) |
| 217 | { |
| 218 | if (dma_ops) |
| 219 | return; |
| 220 | |
| 221 | dma_ops = &hexagon_dma_ops; |
| 222 | } |