Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Renesas SuperH DMA Engine support |
| 3 | * |
| 4 | * base is drivers/dma/flsdma.c |
| 5 | * |
| 6 | * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> |
| 7 | * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. |
| 8 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. |
| 9 | * |
| 10 | * This is free software; you can redistribute it and/or modify |
| 11 | * it under the terms of the GNU General Public License as published by |
| 12 | * the Free Software Foundation; either version 2 of the License, or |
| 13 | * (at your option) any later version. |
| 14 | * |
| 15 | * - DMA of SuperH does not have Hardware DMA chain mode. |
| 16 | * - MAX DMA size is 16MB. |
| 17 | * |
| 18 | */ |
| 19 | |
| 20 | #include <linux/init.h> |
| 21 | #include <linux/module.h> |
| 22 | #include <linux/interrupt.h> |
| 23 | #include <linux/dmaengine.h> |
| 24 | #include <linux/delay.h> |
| 25 | #include <linux/dma-mapping.h> |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 26 | #include <linux/platform_device.h> |
| 27 | #include <cpu/dma.h> |
| 28 | #include <asm/dma-sh.h> |
| 29 | #include "shdma.h" |
| 30 | |
| 31 | /* DMA descriptor control */ |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 32 | enum sh_dmae_desc_status { |
| 33 | DESC_IDLE, |
| 34 | DESC_PREPARED, |
| 35 | DESC_SUBMITTED, |
| 36 | DESC_COMPLETED, /* completed, have to call callback */ |
| 37 | DESC_WAITING, /* callback called, waiting for ack / re-submit */ |
| 38 | }; |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 39 | |
| 40 | #define NR_DESCS_PER_CHANNEL 32 |
| 41 | /* |
| 42 | * Define the default configuration for dual address memory-memory transfer. |
| 43 | * The 0x400 value represents auto-request, external->external. |
| 44 | * |
| 45 | * And this driver set 4byte burst mode. |
| 46 | * If you want to change mode, you need to change RS_DEFAULT of value. |
| 47 | * (ex 1byte burst mode -> (RS_DUAL & ~TS_32) |
| 48 | */ |
| 49 | #define RS_DEFAULT (RS_DUAL) |
| 50 | |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 51 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); |
| 52 | |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 53 | #define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id]) |
| 54 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) |
| 55 | { |
Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 56 | ctrl_outl(data, SH_DMAC_CHAN_BASE(sh_dc->id) + reg); |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 57 | } |
| 58 | |
| 59 | static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) |
| 60 | { |
Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 61 | return ctrl_inl(SH_DMAC_CHAN_BASE(sh_dc->id) + reg); |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 62 | } |
| 63 | |
| 64 | static void dmae_init(struct sh_dmae_chan *sh_chan) |
| 65 | { |
| 66 | u32 chcr = RS_DEFAULT; /* default is DUAL mode */ |
| 67 | sh_dmae_writel(sh_chan, chcr, CHCR); |
| 68 | } |
| 69 | |
| 70 | /* |
| 71 | * Reset DMA controller |
| 72 | * |
| 73 | * SH7780 has two DMAOR register |
| 74 | */ |
| 75 | static void sh_dmae_ctl_stop(int id) |
| 76 | { |
| 77 | unsigned short dmaor = dmaor_read_reg(id); |
| 78 | |
| 79 | dmaor &= ~(DMAOR_NMIF | DMAOR_AE); |
| 80 | dmaor_write_reg(id, dmaor); |
| 81 | } |
| 82 | |
| 83 | static int sh_dmae_rst(int id) |
| 84 | { |
| 85 | unsigned short dmaor; |
| 86 | |
| 87 | sh_dmae_ctl_stop(id); |
Guennadi Liakhovetski | 86d61b3 | 2009-12-10 18:35:07 +0100 | [diff] [blame] | 88 | dmaor = dmaor_read_reg(id) | DMAOR_INIT; |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 89 | |
| 90 | dmaor_write_reg(id, dmaor); |
Guennadi Liakhovetski | 86d61b3 | 2009-12-10 18:35:07 +0100 | [diff] [blame] | 91 | if (dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF)) { |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 92 | pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n"); |
| 93 | return -EINVAL; |
| 94 | } |
| 95 | return 0; |
| 96 | } |
| 97 | |
Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 98 | static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 99 | { |
| 100 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); |
Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 101 | |
| 102 | if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) |
| 103 | return true; /* working */ |
| 104 | |
| 105 | return false; /* waiting */ |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 106 | } |
| 107 | |
Guennadi Liakhovetski | 623b4ac | 2010-02-03 14:44:12 +0000 | [diff] [blame^] | 108 | static unsigned int ts_shift[] = TS_SHIFT; |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 109 | static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan) |
| 110 | { |
| 111 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); |
Guennadi Liakhovetski | 623b4ac | 2010-02-03 14:44:12 +0000 | [diff] [blame^] | 112 | int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) | |
| 113 | ((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT); |
| 114 | |
| 115 | return ts_shift[cnt]; |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 116 | } |
| 117 | |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 118 | static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 119 | { |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 120 | sh_dmae_writel(sh_chan, hw->sar, SAR); |
| 121 | sh_dmae_writel(sh_chan, hw->dar, DAR); |
| 122 | sh_dmae_writel(sh_chan, hw->tcr >> calc_xmit_shift(sh_chan), TCR); |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 123 | } |
| 124 | |
| 125 | static void dmae_start(struct sh_dmae_chan *sh_chan) |
| 126 | { |
| 127 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); |
| 128 | |
Guennadi Liakhovetski | 86d61b3 | 2009-12-10 18:35:07 +0100 | [diff] [blame] | 129 | chcr |= CHCR_DE | CHCR_IE; |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 130 | sh_dmae_writel(sh_chan, chcr, CHCR); |
| 131 | } |
| 132 | |
| 133 | static void dmae_halt(struct sh_dmae_chan *sh_chan) |
| 134 | { |
| 135 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); |
| 136 | |
| 137 | chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE); |
| 138 | sh_dmae_writel(sh_chan, chcr, CHCR); |
| 139 | } |
| 140 | |
| 141 | static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) |
| 142 | { |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 143 | /* When DMA was working, can not set data to CHCR */ |
Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 144 | if (dmae_is_busy(sh_chan)) |
| 145 | return -EBUSY; |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 146 | |
| 147 | sh_dmae_writel(sh_chan, val, CHCR); |
| 148 | return 0; |
| 149 | } |
| 150 | |
| 151 | #define DMARS1_ADDR 0x04 |
| 152 | #define DMARS2_ADDR 0x08 |
| 153 | #define DMARS_SHIFT 8 |
| 154 | #define DMARS_CHAN_MSK 0x01 |
| 155 | static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) |
| 156 | { |
| 157 | u32 addr; |
| 158 | int shift = 0; |
Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 159 | |
| 160 | if (dmae_is_busy(sh_chan)) |
| 161 | return -EBUSY; |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 162 | |
| 163 | if (sh_chan->id & DMARS_CHAN_MSK) |
| 164 | shift = DMARS_SHIFT; |
| 165 | |
| 166 | switch (sh_chan->id) { |
| 167 | /* DMARS0 */ |
| 168 | case 0: |
| 169 | case 1: |
| 170 | addr = SH_DMARS_BASE; |
| 171 | break; |
| 172 | /* DMARS1 */ |
| 173 | case 2: |
| 174 | case 3: |
| 175 | addr = (SH_DMARS_BASE + DMARS1_ADDR); |
| 176 | break; |
| 177 | /* DMARS2 */ |
| 178 | case 4: |
| 179 | case 5: |
| 180 | addr = (SH_DMARS_BASE + DMARS2_ADDR); |
| 181 | break; |
| 182 | default: |
| 183 | return -EINVAL; |
| 184 | } |
| 185 | |
| 186 | ctrl_outw((val << shift) | |
| 187 | (ctrl_inw(addr) & (shift ? 0xFF00 : 0x00FF)), |
| 188 | addr); |
| 189 | |
| 190 | return 0; |
| 191 | } |
| 192 | |
| 193 | static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) |
| 194 | { |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 195 | struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c; |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 196 | struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan); |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 197 | dma_async_tx_callback callback = tx->callback; |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 198 | dma_cookie_t cookie; |
| 199 | |
| 200 | spin_lock_bh(&sh_chan->desc_lock); |
| 201 | |
| 202 | cookie = sh_chan->common.cookie; |
| 203 | cookie++; |
| 204 | if (cookie < 0) |
| 205 | cookie = 1; |
| 206 | |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 207 | sh_chan->common.cookie = cookie; |
| 208 | tx->cookie = cookie; |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 209 | |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 210 | /* Mark all chunks of this descriptor as submitted, move to the queue */ |
| 211 | list_for_each_entry_safe(chunk, c, desc->node.prev, node) { |
| 212 | /* |
| 213 | * All chunks are on the global ld_free, so, we have to find |
| 214 | * the end of the chain ourselves |
| 215 | */ |
| 216 | if (chunk != desc && (chunk->mark == DESC_IDLE || |
| 217 | chunk->async_tx.cookie > 0 || |
| 218 | chunk->async_tx.cookie == -EBUSY || |
| 219 | &chunk->node == &sh_chan->ld_free)) |
| 220 | break; |
| 221 | chunk->mark = DESC_SUBMITTED; |
| 222 | /* Callback goes to the last chunk */ |
| 223 | chunk->async_tx.callback = NULL; |
| 224 | chunk->cookie = cookie; |
| 225 | list_move_tail(&chunk->node, &sh_chan->ld_queue); |
| 226 | last = chunk; |
| 227 | } |
| 228 | |
| 229 | last->async_tx.callback = callback; |
| 230 | last->async_tx.callback_param = tx->callback_param; |
| 231 | |
| 232 | dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n", |
| 233 | tx->cookie, &last->async_tx, sh_chan->id, |
| 234 | desc->hw.sar, desc->hw.tcr, desc->hw.dar); |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 235 | |
| 236 | spin_unlock_bh(&sh_chan->desc_lock); |
| 237 | |
| 238 | return cookie; |
| 239 | } |
| 240 | |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 241 | /* Called with desc_lock held */ |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 242 | static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan) |
| 243 | { |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 244 | struct sh_desc *desc; |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 245 | |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 246 | list_for_each_entry(desc, &sh_chan->ld_free, node) |
| 247 | if (desc->mark != DESC_PREPARED) { |
| 248 | BUG_ON(desc->mark != DESC_IDLE); |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 249 | list_del(&desc->node); |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 250 | return desc; |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 251 | } |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 252 | |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 253 | return NULL; |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 254 | } |
| 255 | |
| 256 | static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) |
| 257 | { |
| 258 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); |
| 259 | struct sh_desc *desc; |
| 260 | |
| 261 | spin_lock_bh(&sh_chan->desc_lock); |
| 262 | while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { |
| 263 | spin_unlock_bh(&sh_chan->desc_lock); |
| 264 | desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL); |
| 265 | if (!desc) { |
| 266 | spin_lock_bh(&sh_chan->desc_lock); |
| 267 | break; |
| 268 | } |
| 269 | dma_async_tx_descriptor_init(&desc->async_tx, |
| 270 | &sh_chan->common); |
| 271 | desc->async_tx.tx_submit = sh_dmae_tx_submit; |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 272 | desc->mark = DESC_IDLE; |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 273 | |
| 274 | spin_lock_bh(&sh_chan->desc_lock); |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 275 | list_add(&desc->node, &sh_chan->ld_free); |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 276 | sh_chan->descs_allocated++; |
| 277 | } |
| 278 | spin_unlock_bh(&sh_chan->desc_lock); |
| 279 | |
| 280 | return sh_chan->descs_allocated; |
| 281 | } |
| 282 | |
| 283 | /* |
| 284 | * sh_dma_free_chan_resources - Free all resources of the channel. |
| 285 | */ |
| 286 | static void sh_dmae_free_chan_resources(struct dma_chan *chan) |
| 287 | { |
| 288 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); |
| 289 | struct sh_desc *desc, *_desc; |
| 290 | LIST_HEAD(list); |
| 291 | |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 292 | /* Prepared and not submitted descriptors can still be on the queue */ |
| 293 | if (!list_empty(&sh_chan->ld_queue)) |
| 294 | sh_dmae_chan_ld_cleanup(sh_chan, true); |
| 295 | |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 296 | spin_lock_bh(&sh_chan->desc_lock); |
| 297 | |
| 298 | list_splice_init(&sh_chan->ld_free, &list); |
| 299 | sh_chan->descs_allocated = 0; |
| 300 | |
| 301 | spin_unlock_bh(&sh_chan->desc_lock); |
| 302 | |
| 303 | list_for_each_entry_safe(desc, _desc, &list, node) |
| 304 | kfree(desc); |
| 305 | } |
| 306 | |
Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 307 | /* |
| 308 | * sh_dmae_add_desc - get, set up and return one transfer descriptor |
| 309 | * @sh_chan: DMA channel |
| 310 | * @flags: DMA transfer flags |
| 311 | * @dest: destination DMA address, incremented when direction equals |
| 312 | * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL |
| 313 | * @src: source DMA address, incremented when direction equals |
| 314 | * DMA_TO_DEVICE or DMA_BIDIRECTIONAL |
| 315 | * @len: DMA transfer length |
| 316 | * @first: if NULL, set to the current descriptor and cookie set to -EBUSY |
| 317 | * @direction: needed for slave DMA to decide which address to keep constant, |
| 318 | * equals DMA_BIDIRECTIONAL for MEMCPY |
| 319 | * Returns 0 or an error |
| 320 | * Locks: called with desc_lock held |
| 321 | */ |
| 322 | static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, |
| 323 | unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len, |
| 324 | struct sh_desc **first, enum dma_data_direction direction) |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 325 | { |
Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 326 | struct sh_desc *new; |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 327 | size_t copy_size; |
Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 328 | |
| 329 | if (!*len) |
| 330 | return NULL; |
| 331 | |
| 332 | /* Allocate the link descriptor from the free list */ |
| 333 | new = sh_dmae_get_desc(sh_chan); |
| 334 | if (!new) { |
| 335 | dev_err(sh_chan->dev, "No free link descriptor available\n"); |
| 336 | return NULL; |
| 337 | } |
| 338 | |
| 339 | copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1); |
| 340 | |
| 341 | new->hw.sar = *src; |
| 342 | new->hw.dar = *dest; |
| 343 | new->hw.tcr = copy_size; |
| 344 | |
| 345 | if (!*first) { |
| 346 | /* First desc */ |
| 347 | new->async_tx.cookie = -EBUSY; |
| 348 | *first = new; |
| 349 | } else { |
| 350 | /* Other desc - invisible to the user */ |
| 351 | new->async_tx.cookie = -EINVAL; |
| 352 | } |
| 353 | |
| 354 | dev_dbg(sh_chan->dev, "chaining (%u/%u)@%x -> %x with %p, cookie %d\n", |
| 355 | copy_size, *len, *src, *dest, &new->async_tx, |
| 356 | new->async_tx.cookie); |
| 357 | |
| 358 | new->mark = DESC_PREPARED; |
| 359 | new->async_tx.flags = flags; |
| 360 | |
| 361 | *len -= copy_size; |
| 362 | if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE) |
| 363 | *src += copy_size; |
| 364 | if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE) |
| 365 | *dest += copy_size; |
| 366 | |
| 367 | return new; |
| 368 | } |
| 369 | |
| 370 | /* |
| 371 | * sh_dmae_prep_sg - prepare transfer descriptors from an SG list |
| 372 | * |
| 373 | * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also |
| 374 | * converted to scatter-gather to guarantee consistent locking and a correct |
| 375 | * list manipulation. For slave DMA direction carries the usual meaning, and, |
| 376 | * logically, the SG list is RAM and the addr variable contains slave address, |
| 377 | * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL |
| 378 | * and the SG list contains only one element and points at the source buffer. |
| 379 | */ |
| 380 | static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan, |
| 381 | struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, |
| 382 | enum dma_data_direction direction, unsigned long flags) |
| 383 | { |
| 384 | struct scatterlist *sg; |
| 385 | struct sh_desc *first = NULL, *new = NULL /* compiler... */; |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 386 | LIST_HEAD(tx_list); |
Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 387 | int chunks = 0; |
| 388 | int i; |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 389 | |
Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 390 | if (!sg_len) |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 391 | return NULL; |
| 392 | |
Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 393 | for_each_sg(sgl, sg, sg_len, i) |
| 394 | chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) / |
| 395 | (SH_DMA_TCR_MAX + 1); |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 396 | |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 397 | /* Have to lock the whole loop to protect against concurrent release */ |
| 398 | spin_lock_bh(&sh_chan->desc_lock); |
| 399 | |
| 400 | /* |
| 401 | * Chaining: |
| 402 | * first descriptor is what user is dealing with in all API calls, its |
| 403 | * cookie is at first set to -EBUSY, at tx-submit to a positive |
| 404 | * number |
| 405 | * if more than one chunk is needed further chunks have cookie = -EINVAL |
| 406 | * the last chunk, if not equal to the first, has cookie = -ENOSPC |
| 407 | * all chunks are linked onto the tx_list head with their .node heads |
| 408 | * only during this function, then they are immediately spliced |
| 409 | * back onto the free list in form of a chain |
| 410 | */ |
Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 411 | for_each_sg(sgl, sg, sg_len, i) { |
| 412 | dma_addr_t sg_addr = sg_dma_address(sg); |
| 413 | size_t len = sg_dma_len(sg); |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 414 | |
Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 415 | if (!len) |
| 416 | goto err_get_desc; |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 417 | |
Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 418 | do { |
| 419 | dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n", |
| 420 | i, sg, len, (unsigned long long)sg_addr); |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 421 | |
Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 422 | if (direction == DMA_FROM_DEVICE) |
| 423 | new = sh_dmae_add_desc(sh_chan, flags, |
| 424 | &sg_addr, addr, &len, &first, |
| 425 | direction); |
| 426 | else |
| 427 | new = sh_dmae_add_desc(sh_chan, flags, |
| 428 | addr, &sg_addr, &len, &first, |
| 429 | direction); |
| 430 | if (!new) |
| 431 | goto err_get_desc; |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 432 | |
Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 433 | new->chunks = chunks--; |
| 434 | list_add_tail(&new->node, &tx_list); |
| 435 | } while (len); |
| 436 | } |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 437 | |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 438 | if (new != first) |
| 439 | new->async_tx.cookie = -ENOSPC; |
| 440 | |
| 441 | /* Put them back on the free list, so, they don't get lost */ |
| 442 | list_splice_tail(&tx_list, &sh_chan->ld_free); |
| 443 | |
| 444 | spin_unlock_bh(&sh_chan->desc_lock); |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 445 | |
| 446 | return &first->async_tx; |
Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 447 | |
| 448 | err_get_desc: |
| 449 | list_for_each_entry(new, &tx_list, node) |
| 450 | new->mark = DESC_IDLE; |
| 451 | list_splice(&tx_list, &sh_chan->ld_free); |
| 452 | |
| 453 | spin_unlock_bh(&sh_chan->desc_lock); |
| 454 | |
| 455 | return NULL; |
| 456 | } |
| 457 | |
| 458 | static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( |
| 459 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, |
| 460 | size_t len, unsigned long flags) |
| 461 | { |
| 462 | struct sh_dmae_chan *sh_chan; |
| 463 | struct scatterlist sg; |
| 464 | |
| 465 | if (!chan || !len) |
| 466 | return NULL; |
| 467 | |
| 468 | sh_chan = to_sh_chan(chan); |
| 469 | |
| 470 | sg_init_table(&sg, 1); |
| 471 | sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len, |
| 472 | offset_in_page(dma_src)); |
| 473 | sg_dma_address(&sg) = dma_src; |
| 474 | sg_dma_len(&sg) = len; |
| 475 | |
| 476 | return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL, |
| 477 | flags); |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 478 | } |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 479 | |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 480 | static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) |
| 481 | { |
| 482 | struct sh_desc *desc, *_desc; |
| 483 | /* Is the "exposed" head of a chain acked? */ |
| 484 | bool head_acked = false; |
| 485 | dma_cookie_t cookie = 0; |
| 486 | dma_async_tx_callback callback = NULL; |
| 487 | void *param = NULL; |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 488 | |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 489 | spin_lock_bh(&sh_chan->desc_lock); |
| 490 | list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) { |
| 491 | struct dma_async_tx_descriptor *tx = &desc->async_tx; |
| 492 | |
| 493 | BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie); |
| 494 | BUG_ON(desc->mark != DESC_SUBMITTED && |
| 495 | desc->mark != DESC_COMPLETED && |
| 496 | desc->mark != DESC_WAITING); |
| 497 | |
| 498 | /* |
| 499 | * queue is ordered, and we use this loop to (1) clean up all |
| 500 | * completed descriptors, and to (2) update descriptor flags of |
| 501 | * any chunks in a (partially) completed chain |
| 502 | */ |
| 503 | if (!all && desc->mark == DESC_SUBMITTED && |
| 504 | desc->cookie != cookie) |
| 505 | break; |
| 506 | |
| 507 | if (tx->cookie > 0) |
| 508 | cookie = tx->cookie; |
| 509 | |
| 510 | if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { |
| 511 | BUG_ON(sh_chan->completed_cookie != desc->cookie - 1); |
| 512 | sh_chan->completed_cookie = desc->cookie; |
| 513 | } |
| 514 | |
| 515 | /* Call callback on the last chunk */ |
| 516 | if (desc->mark == DESC_COMPLETED && tx->callback) { |
| 517 | desc->mark = DESC_WAITING; |
| 518 | callback = tx->callback; |
| 519 | param = tx->callback_param; |
| 520 | dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n", |
| 521 | tx->cookie, tx, sh_chan->id); |
| 522 | BUG_ON(desc->chunks != 1); |
| 523 | break; |
| 524 | } |
| 525 | |
| 526 | if (tx->cookie > 0 || tx->cookie == -EBUSY) { |
| 527 | if (desc->mark == DESC_COMPLETED) { |
| 528 | BUG_ON(tx->cookie < 0); |
| 529 | desc->mark = DESC_WAITING; |
| 530 | } |
| 531 | head_acked = async_tx_test_ack(tx); |
| 532 | } else { |
| 533 | switch (desc->mark) { |
| 534 | case DESC_COMPLETED: |
| 535 | desc->mark = DESC_WAITING; |
| 536 | /* Fall through */ |
| 537 | case DESC_WAITING: |
| 538 | if (head_acked) |
| 539 | async_tx_ack(&desc->async_tx); |
| 540 | } |
| 541 | } |
| 542 | |
| 543 | dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n", |
| 544 | tx, tx->cookie); |
| 545 | |
| 546 | if (((desc->mark == DESC_COMPLETED || |
| 547 | desc->mark == DESC_WAITING) && |
| 548 | async_tx_test_ack(&desc->async_tx)) || all) { |
| 549 | /* Remove from ld_queue list */ |
| 550 | desc->mark = DESC_IDLE; |
| 551 | list_move(&desc->node, &sh_chan->ld_free); |
| 552 | } |
| 553 | } |
| 554 | spin_unlock_bh(&sh_chan->desc_lock); |
| 555 | |
| 556 | if (callback) |
| 557 | callback(param); |
| 558 | |
| 559 | return callback; |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 560 | } |
| 561 | |
| 562 | /* |
| 563 | * sh_chan_ld_cleanup - Clean up link descriptors |
| 564 | * |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 565 | * This function cleans up the ld_queue of DMA channel. |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 566 | */ |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 567 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 568 | { |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 569 | while (__ld_cleanup(sh_chan, all)) |
| 570 | ; |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 571 | } |
| 572 | |
| 573 | static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) |
| 574 | { |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 575 | struct sh_desc *sd; |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 576 | |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 577 | spin_lock_bh(&sh_chan->desc_lock); |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 578 | /* DMA work check */ |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 579 | if (dmae_is_busy(sh_chan)) { |
| 580 | spin_unlock_bh(&sh_chan->desc_lock); |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 581 | return; |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 582 | } |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 583 | |
| 584 | /* Find the first un-transfer desciptor */ |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 585 | list_for_each_entry(sd, &sh_chan->ld_queue, node) |
| 586 | if (sd->mark == DESC_SUBMITTED) { |
| 587 | /* Get the ld start address from ld_queue */ |
| 588 | dmae_set_reg(sh_chan, &sd->hw); |
| 589 | dmae_start(sh_chan); |
| 590 | break; |
| 591 | } |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 592 | |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 593 | spin_unlock_bh(&sh_chan->desc_lock); |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 594 | } |
| 595 | |
| 596 | static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan) |
| 597 | { |
| 598 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); |
| 599 | sh_chan_xfer_ld_queue(sh_chan); |
| 600 | } |
| 601 | |
| 602 | static enum dma_status sh_dmae_is_complete(struct dma_chan *chan, |
| 603 | dma_cookie_t cookie, |
| 604 | dma_cookie_t *done, |
| 605 | dma_cookie_t *used) |
| 606 | { |
| 607 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); |
| 608 | dma_cookie_t last_used; |
| 609 | dma_cookie_t last_complete; |
| 610 | |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 611 | sh_dmae_chan_ld_cleanup(sh_chan, false); |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 612 | |
| 613 | last_used = chan->cookie; |
| 614 | last_complete = sh_chan->completed_cookie; |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 615 | BUG_ON(last_complete < 0); |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 616 | |
| 617 | if (done) |
| 618 | *done = last_complete; |
| 619 | |
| 620 | if (used) |
| 621 | *used = last_used; |
| 622 | |
| 623 | return dma_async_is_complete(cookie, last_complete, last_used); |
| 624 | } |
| 625 | |
| 626 | static irqreturn_t sh_dmae_interrupt(int irq, void *data) |
| 627 | { |
| 628 | irqreturn_t ret = IRQ_NONE; |
| 629 | struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; |
| 630 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); |
| 631 | |
| 632 | if (chcr & CHCR_TE) { |
| 633 | /* DMA stop */ |
| 634 | dmae_halt(sh_chan); |
| 635 | |
| 636 | ret = IRQ_HANDLED; |
| 637 | tasklet_schedule(&sh_chan->tasklet); |
| 638 | } |
| 639 | |
| 640 | return ret; |
| 641 | } |
| 642 | |
| 643 | #if defined(CONFIG_CPU_SH4) |
| 644 | static irqreturn_t sh_dmae_err(int irq, void *data) |
| 645 | { |
| 646 | int err = 0; |
| 647 | struct sh_dmae_device *shdev = (struct sh_dmae_device *)data; |
| 648 | |
| 649 | /* IRQ Multi */ |
| 650 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { |
Guennadi Liakhovetski | fc46185 | 2010-01-19 07:24:55 +0000 | [diff] [blame] | 651 | int __maybe_unused cnt = 0; |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 652 | switch (irq) { |
| 653 | #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) |
| 654 | case DMTE6_IRQ: |
| 655 | cnt++; |
| 656 | #endif |
| 657 | case DMTE0_IRQ: |
| 658 | if (dmaor_read_reg(cnt) & (DMAOR_NMIF | DMAOR_AE)) { |
| 659 | disable_irq(irq); |
| 660 | return IRQ_HANDLED; |
| 661 | } |
| 662 | default: |
| 663 | return IRQ_NONE; |
| 664 | } |
| 665 | } else { |
| 666 | /* reset dma controller */ |
| 667 | err = sh_dmae_rst(0); |
| 668 | if (err) |
| 669 | return err; |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 670 | #ifdef SH_DMAC_BASE1 |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 671 | if (shdev->pdata.mode & SHDMA_DMAOR1) { |
| 672 | err = sh_dmae_rst(1); |
| 673 | if (err) |
| 674 | return err; |
| 675 | } |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 676 | #endif |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 677 | disable_irq(irq); |
| 678 | return IRQ_HANDLED; |
| 679 | } |
| 680 | } |
| 681 | #endif |
| 682 | |
| 683 | static void dmae_do_tasklet(unsigned long data) |
| 684 | { |
| 685 | struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 686 | struct sh_desc *desc; |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 687 | u32 sar_buf = sh_dmae_readl(sh_chan, SAR); |
Guennadi Liakhovetski | 86d61b3 | 2009-12-10 18:35:07 +0100 | [diff] [blame] | 688 | |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 689 | spin_lock(&sh_chan->desc_lock); |
| 690 | list_for_each_entry(desc, &sh_chan->ld_queue, node) { |
| 691 | if ((desc->hw.sar + desc->hw.tcr) == sar_buf && |
| 692 | desc->mark == DESC_SUBMITTED) { |
| 693 | dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n", |
| 694 | desc->async_tx.cookie, &desc->async_tx, |
| 695 | desc->hw.dar); |
| 696 | desc->mark = DESC_COMPLETED; |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 697 | break; |
| 698 | } |
| 699 | } |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 700 | spin_unlock(&sh_chan->desc_lock); |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 701 | |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 702 | /* Next desc */ |
| 703 | sh_chan_xfer_ld_queue(sh_chan); |
Guennadi Liakhovetski | 3542a11 | 2009-12-17 09:41:39 -0700 | [diff] [blame] | 704 | sh_dmae_chan_ld_cleanup(sh_chan, false); |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 705 | } |
| 706 | |
| 707 | static unsigned int get_dmae_irq(unsigned int id) |
| 708 | { |
| 709 | unsigned int irq = 0; |
| 710 | if (id < ARRAY_SIZE(dmte_irq_map)) |
| 711 | irq = dmte_irq_map[id]; |
| 712 | return irq; |
| 713 | } |
| 714 | |
| 715 | static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id) |
| 716 | { |
| 717 | int err; |
| 718 | unsigned int irq = get_dmae_irq(id); |
| 719 | unsigned long irqflags = IRQF_DISABLED; |
| 720 | struct sh_dmae_chan *new_sh_chan; |
| 721 | |
| 722 | /* alloc channel */ |
| 723 | new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL); |
| 724 | if (!new_sh_chan) { |
Guennadi Liakhovetski | 86d61b3 | 2009-12-10 18:35:07 +0100 | [diff] [blame] | 725 | dev_err(shdev->common.dev, |
| 726 | "No free memory for allocating dma channels!\n"); |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 727 | return -ENOMEM; |
| 728 | } |
| 729 | |
| 730 | new_sh_chan->dev = shdev->common.dev; |
| 731 | new_sh_chan->id = id; |
| 732 | |
| 733 | /* Init DMA tasklet */ |
| 734 | tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, |
| 735 | (unsigned long)new_sh_chan); |
| 736 | |
| 737 | /* Init the channel */ |
| 738 | dmae_init(new_sh_chan); |
| 739 | |
| 740 | spin_lock_init(&new_sh_chan->desc_lock); |
| 741 | |
| 742 | /* Init descripter manage list */ |
| 743 | INIT_LIST_HEAD(&new_sh_chan->ld_queue); |
| 744 | INIT_LIST_HEAD(&new_sh_chan->ld_free); |
| 745 | |
| 746 | /* copy struct dma_device */ |
| 747 | new_sh_chan->common.device = &shdev->common; |
| 748 | |
| 749 | /* Add the channel to DMA device channel list */ |
| 750 | list_add_tail(&new_sh_chan->common.device_node, |
| 751 | &shdev->common.channels); |
| 752 | shdev->common.chancnt++; |
| 753 | |
| 754 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { |
| 755 | irqflags = IRQF_SHARED; |
| 756 | #if defined(DMTE6_IRQ) |
| 757 | if (irq >= DMTE6_IRQ) |
| 758 | irq = DMTE6_IRQ; |
| 759 | else |
| 760 | #endif |
| 761 | irq = DMTE0_IRQ; |
| 762 | } |
| 763 | |
| 764 | snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), |
| 765 | "sh-dmae%d", new_sh_chan->id); |
| 766 | |
| 767 | /* set up channel irq */ |
Guennadi Liakhovetski | 86d61b3 | 2009-12-10 18:35:07 +0100 | [diff] [blame] | 768 | err = request_irq(irq, &sh_dmae_interrupt, irqflags, |
| 769 | new_sh_chan->dev_id, new_sh_chan); |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 770 | if (err) { |
| 771 | dev_err(shdev->common.dev, "DMA channel %d request_irq error " |
| 772 | "with return %d\n", id, err); |
| 773 | goto err_no_irq; |
| 774 | } |
| 775 | |
| 776 | /* CHCR register control function */ |
| 777 | new_sh_chan->set_chcr = dmae_set_chcr; |
| 778 | /* DMARS register control function */ |
| 779 | new_sh_chan->set_dmars = dmae_set_dmars; |
| 780 | |
| 781 | shdev->chan[id] = new_sh_chan; |
| 782 | return 0; |
| 783 | |
| 784 | err_no_irq: |
| 785 | /* remove from dmaengine device node */ |
| 786 | list_del(&new_sh_chan->common.device_node); |
| 787 | kfree(new_sh_chan); |
| 788 | return err; |
| 789 | } |
| 790 | |
| 791 | static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) |
| 792 | { |
| 793 | int i; |
| 794 | |
| 795 | for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) { |
| 796 | if (shdev->chan[i]) { |
| 797 | struct sh_dmae_chan *shchan = shdev->chan[i]; |
| 798 | if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) |
| 799 | free_irq(dmte_irq_map[i], shchan); |
| 800 | |
| 801 | list_del(&shchan->common.device_node); |
| 802 | kfree(shchan); |
| 803 | shdev->chan[i] = NULL; |
| 804 | } |
| 805 | } |
| 806 | shdev->common.chancnt = 0; |
| 807 | } |
| 808 | |
| 809 | static int __init sh_dmae_probe(struct platform_device *pdev) |
| 810 | { |
| 811 | int err = 0, cnt, ecnt; |
| 812 | unsigned long irqflags = IRQF_DISABLED; |
| 813 | #if defined(CONFIG_CPU_SH4) |
| 814 | int eirq[] = { DMAE0_IRQ, |
| 815 | #if defined(DMAE1_IRQ) |
| 816 | DMAE1_IRQ |
| 817 | #endif |
| 818 | }; |
| 819 | #endif |
| 820 | struct sh_dmae_device *shdev; |
| 821 | |
Dan Williams | 56adf7e | 2009-11-22 12:10:10 -0700 | [diff] [blame] | 822 | /* get platform data */ |
| 823 | if (!pdev->dev.platform_data) |
| 824 | return -ENODEV; |
| 825 | |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 826 | shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); |
| 827 | if (!shdev) { |
| 828 | dev_err(&pdev->dev, "No enough memory\n"); |
Dan Williams | 56adf7e | 2009-11-22 12:10:10 -0700 | [diff] [blame] | 829 | return -ENOMEM; |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 830 | } |
| 831 | |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 832 | /* platform data */ |
| 833 | memcpy(&shdev->pdata, pdev->dev.platform_data, |
| 834 | sizeof(struct sh_dmae_pdata)); |
| 835 | |
| 836 | /* reset dma controller */ |
| 837 | err = sh_dmae_rst(0); |
| 838 | if (err) |
| 839 | goto rst_err; |
| 840 | |
| 841 | /* SH7780/85/23 has DMAOR1 */ |
| 842 | if (shdev->pdata.mode & SHDMA_DMAOR1) { |
| 843 | err = sh_dmae_rst(1); |
| 844 | if (err) |
| 845 | goto rst_err; |
| 846 | } |
| 847 | |
| 848 | INIT_LIST_HEAD(&shdev->common.channels); |
| 849 | |
| 850 | dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); |
| 851 | shdev->common.device_alloc_chan_resources |
| 852 | = sh_dmae_alloc_chan_resources; |
| 853 | shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; |
| 854 | shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; |
| 855 | shdev->common.device_is_tx_complete = sh_dmae_is_complete; |
| 856 | shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; |
| 857 | shdev->common.dev = &pdev->dev; |
Guennadi Liakhovetski | ddb4f0f | 2009-12-04 19:44:41 +0100 | [diff] [blame] | 858 | /* Default transfer size of 32 bytes requires 32-byte alignment */ |
| 859 | shdev->common.copy_align = 5; |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 860 | |
| 861 | #if defined(CONFIG_CPU_SH4) |
| 862 | /* Non Mix IRQ mode SH7722/SH7730 etc... */ |
| 863 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { |
| 864 | irqflags = IRQF_SHARED; |
| 865 | eirq[0] = DMTE0_IRQ; |
| 866 | #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) |
| 867 | eirq[1] = DMTE6_IRQ; |
| 868 | #endif |
| 869 | } |
| 870 | |
| 871 | for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) { |
Guennadi Liakhovetski | 86d61b3 | 2009-12-10 18:35:07 +0100 | [diff] [blame] | 872 | err = request_irq(eirq[ecnt], sh_dmae_err, irqflags, |
| 873 | "DMAC Address Error", shdev); |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 874 | if (err) { |
| 875 | dev_err(&pdev->dev, "DMA device request_irq" |
| 876 | "error (irq %d) with return %d\n", |
| 877 | eirq[ecnt], err); |
| 878 | goto eirq_err; |
| 879 | } |
| 880 | } |
| 881 | #endif /* CONFIG_CPU_SH4 */ |
| 882 | |
| 883 | /* Create DMA Channel */ |
| 884 | for (cnt = 0 ; cnt < MAX_DMA_CHANNELS ; cnt++) { |
| 885 | err = sh_dmae_chan_probe(shdev, cnt); |
| 886 | if (err) |
| 887 | goto chan_probe_err; |
| 888 | } |
| 889 | |
| 890 | platform_set_drvdata(pdev, shdev); |
| 891 | dma_async_device_register(&shdev->common); |
| 892 | |
| 893 | return err; |
| 894 | |
| 895 | chan_probe_err: |
| 896 | sh_dmae_chan_remove(shdev); |
| 897 | |
| 898 | eirq_err: |
| 899 | for (ecnt-- ; ecnt >= 0; ecnt--) |
| 900 | free_irq(eirq[ecnt], shdev); |
| 901 | |
| 902 | rst_err: |
| 903 | kfree(shdev); |
| 904 | |
Nobuhiro Iwamatsu | d8902ad | 2009-09-07 03:26:23 +0000 | [diff] [blame] | 905 | return err; |
| 906 | } |
| 907 | |
| 908 | static int __exit sh_dmae_remove(struct platform_device *pdev) |
| 909 | { |
| 910 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); |
| 911 | |
| 912 | dma_async_device_unregister(&shdev->common); |
| 913 | |
| 914 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { |
| 915 | free_irq(DMTE0_IRQ, shdev); |
| 916 | #if defined(DMTE6_IRQ) |
| 917 | free_irq(DMTE6_IRQ, shdev); |
| 918 | #endif |
| 919 | } |
| 920 | |
| 921 | /* channel data remove */ |
| 922 | sh_dmae_chan_remove(shdev); |
| 923 | |
| 924 | if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) { |
| 925 | free_irq(DMAE0_IRQ, shdev); |
| 926 | #if defined(DMAE1_IRQ) |
| 927 | free_irq(DMAE1_IRQ, shdev); |
| 928 | #endif |
| 929 | } |
| 930 | kfree(shdev); |
| 931 | |
| 932 | return 0; |
| 933 | } |
| 934 | |
| 935 | static void sh_dmae_shutdown(struct platform_device *pdev) |
| 936 | { |
| 937 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); |
| 938 | sh_dmae_ctl_stop(0); |
| 939 | if (shdev->pdata.mode & SHDMA_DMAOR1) |
| 940 | sh_dmae_ctl_stop(1); |
| 941 | } |
| 942 | |
| 943 | static struct platform_driver sh_dmae_driver = { |
| 944 | .remove = __exit_p(sh_dmae_remove), |
| 945 | .shutdown = sh_dmae_shutdown, |
| 946 | .driver = { |
| 947 | .name = "sh-dma-engine", |
| 948 | }, |
| 949 | }; |
| 950 | |
| 951 | static int __init sh_dmae_init(void) |
| 952 | { |
| 953 | return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); |
| 954 | } |
| 955 | module_init(sh_dmae_init); |
| 956 | |
| 957 | static void __exit sh_dmae_exit(void) |
| 958 | { |
| 959 | platform_driver_unregister(&sh_dmae_driver); |
| 960 | } |
| 961 | module_exit(sh_dmae_exit); |
| 962 | |
| 963 | MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); |
| 964 | MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); |
| 965 | MODULE_LICENSE("GPL"); |