blob: c32103f04fb3d646e278ad1dd1ed67e33428421d [file] [log] [blame]
Sascha Hauer1f1846c2010-10-06 10:25:55 +02001/*
2 * drivers/dma/imx-dma.c
3 *
4 * This file contains a driver for the Freescale i.MX DMA engine
5 * found on i.MX1/21/27
6 *
7 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
Javier Martin9e15db72012-03-02 09:28:47 +01008 * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
Sascha Hauer1f1846c2010-10-06 10:25:55 +02009 *
10 * The code contained herein is licensed under the GNU General Public
11 * License. You may obtain a copy of the GNU General Public License
12 * Version 2 or later at the following locations:
13 *
14 * http://www.opensource.org/licenses/gpl-license.html
15 * http://www.gnu.org/copyleft/gpl.html
16 */
Javier Martin9e15db72012-03-02 09:28:47 +010017
Sascha Hauer1f1846c2010-10-06 10:25:55 +020018#include <linux/init.h>
Axel Linf8de8f42011-08-30 15:08:24 +080019#include <linux/module.h>
Sascha Hauer1f1846c2010-10-06 10:25:55 +020020#include <linux/types.h>
21#include <linux/mm.h>
22#include <linux/interrupt.h>
23#include <linux/spinlock.h>
24#include <linux/device.h>
25#include <linux/dma-mapping.h>
26#include <linux/slab.h>
27#include <linux/platform_device.h>
28#include <linux/dmaengine.h>
Vinod Koul5170c052012-03-09 14:55:25 +053029#include <linux/module.h>
Sascha Hauer1f1846c2010-10-06 10:25:55 +020030
31#include <asm/irq.h>
32#include <mach/dma-v1.h>
33#include <mach/hardware.h>
34
Javier Martin9e15db72012-03-02 09:28:47 +010035#define IMXDMA_MAX_CHAN_DESCRIPTORS 16
36
37enum imxdma_prep_type {
38 IMXDMA_DESC_MEMCPY,
39 IMXDMA_DESC_INTERLEAVED,
40 IMXDMA_DESC_SLAVE_SG,
41 IMXDMA_DESC_CYCLIC,
42};
43
44struct imxdma_desc {
45 struct list_head node;
46 struct dma_async_tx_descriptor desc;
47 enum dma_status status;
48 dma_addr_t src;
49 dma_addr_t dest;
50 size_t len;
51 unsigned int dmamode;
52 enum imxdma_prep_type type;
53 /* For memcpy and interleaved */
54 unsigned int config_port;
55 unsigned int config_mem;
56 /* For interleaved transfers */
57 unsigned int x;
58 unsigned int y;
59 unsigned int w;
60 /* For slave sg and cyclic */
61 struct scatterlist *sg;
62 unsigned int sgcount;
63};
64
Sascha Hauer1f1846c2010-10-06 10:25:55 +020065struct imxdma_channel {
66 struct imxdma_engine *imxdma;
67 unsigned int channel;
68 unsigned int imxdma_channel;
69
Javier Martin9e15db72012-03-02 09:28:47 +010070 struct tasklet_struct dma_tasklet;
71 struct list_head ld_free;
72 struct list_head ld_queue;
73 struct list_head ld_active;
74 int descs_allocated;
Sascha Hauer1f1846c2010-10-06 10:25:55 +020075 enum dma_slave_buswidth word_size;
76 dma_addr_t per_address;
77 u32 watermark_level;
78 struct dma_chan chan;
79 spinlock_t lock;
Sascha Hauer1f1846c2010-10-06 10:25:55 +020080 dma_cookie_t last_completed;
Sascha Hauer1f1846c2010-10-06 10:25:55 +020081 int dma_request;
82 struct scatterlist *sg_list;
83};
84
85#define MAX_DMA_CHANNELS 8
86
87struct imxdma_engine {
88 struct device *dev;
Sascha Hauer1e070a62011-01-12 13:14:37 +010089 struct device_dma_parameters dma_parms;
Sascha Hauer1f1846c2010-10-06 10:25:55 +020090 struct dma_device dma_device;
91 struct imxdma_channel channel[MAX_DMA_CHANNELS];
92};
93
94static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
95{
96 return container_of(chan, struct imxdma_channel, chan);
97}
98
Javier Martin9e15db72012-03-02 09:28:47 +010099static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac)
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200100{
Javier Martin9e15db72012-03-02 09:28:47 +0100101 struct imxdma_desc *desc;
102
103 if (!list_empty(&imxdmac->ld_active)) {
104 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc,
105 node);
106 if (desc->type == IMXDMA_DESC_CYCLIC)
107 return true;
108 }
109 return false;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200110}
111
112static void imxdma_irq_handler(int channel, void *data)
113{
114 struct imxdma_channel *imxdmac = data;
115
Javier Martin9e15db72012-03-02 09:28:47 +0100116 tasklet_schedule(&imxdmac->dma_tasklet);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200117}
118
119static void imxdma_err_handler(int channel, void *data, int error)
120{
121 struct imxdma_channel *imxdmac = data;
122
Javier Martin9e15db72012-03-02 09:28:47 +0100123 tasklet_schedule(&imxdmac->dma_tasklet);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200124}
125
126static void imxdma_progression(int channel, void *data,
127 struct scatterlist *sg)
128{
129 struct imxdma_channel *imxdmac = data;
130
Javier Martin9e15db72012-03-02 09:28:47 +0100131 tasklet_schedule(&imxdmac->dma_tasklet);
132}
133
134static int imxdma_xfer_desc(struct imxdma_desc *d)
135{
136 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
137 int ret;
138
139 /* Configure and enable */
140 switch (d->type) {
141 case IMXDMA_DESC_MEMCPY:
142 ret = imx_dma_config_channel(imxdmac->imxdma_channel,
143 d->config_port, d->config_mem, 0, 0);
144 if (ret < 0)
145 return ret;
146 ret = imx_dma_setup_single(imxdmac->imxdma_channel, d->src,
147 d->len, d->dest, d->dmamode);
148 if (ret < 0)
149 return ret;
150 break;
151 case IMXDMA_DESC_CYCLIC:
152 ret = imx_dma_setup_progression_handler(imxdmac->imxdma_channel,
153 imxdma_progression);
154 if (ret < 0)
155 return ret;
156 /*
157 * We fall through here since cyclic transfer is the same as
158 * slave_sg adding a progression handler and a specific sg
159 * configuration which is done in 'imxdma_prep_dma_cyclic'.
160 */
161 case IMXDMA_DESC_SLAVE_SG:
162 if (d->dmamode == DMA_MODE_READ)
163 ret = imx_dma_setup_sg(imxdmac->imxdma_channel, d->sg,
164 d->sgcount, d->len, d->src, d->dmamode);
165 else
166 ret = imx_dma_setup_sg(imxdmac->imxdma_channel, d->sg,
167 d->sgcount, d->len, d->dest, d->dmamode);
168 if (ret < 0)
169 return ret;
170 break;
171 default:
172 return -EINVAL;
173 }
174 imx_dma_enable(imxdmac->imxdma_channel);
175 return 0;
176}
177
178static void imxdma_tasklet(unsigned long data)
179{
180 struct imxdma_channel *imxdmac = (void *)data;
181 struct imxdma_engine *imxdma = imxdmac->imxdma;
182 struct imxdma_desc *desc;
183
184 spin_lock(&imxdmac->lock);
185
186 if (list_empty(&imxdmac->ld_active)) {
187 /* Someone might have called terminate all */
188 goto out;
189 }
190 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
191
192 if (desc->desc.callback)
193 desc->desc.callback(desc->desc.callback_param);
194
195 imxdmac->last_completed = desc->desc.cookie;
196
197 /* If we are dealing with a cyclic descriptor keep it on ld_active */
198 if (imxdma_chan_is_doing_cyclic(imxdmac))
199 goto out;
200
201 list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
202
203 if (!list_empty(&imxdmac->ld_queue)) {
204 desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
205 node);
206 list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
207 if (imxdma_xfer_desc(desc) < 0)
208 dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
209 __func__, imxdmac->channel);
210 }
211out:
212 spin_unlock(&imxdmac->lock);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200213}
214
215static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
216 unsigned long arg)
217{
218 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
219 struct dma_slave_config *dmaengine_cfg = (void *)arg;
220 int ret;
Javier Martin9e15db72012-03-02 09:28:47 +0100221 unsigned long flags;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200222 unsigned int mode = 0;
223
224 switch (cmd) {
225 case DMA_TERMINATE_ALL:
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200226 imx_dma_disable(imxdmac->imxdma_channel);
Javier Martin9e15db72012-03-02 09:28:47 +0100227
228 spin_lock_irqsave(&imxdmac->lock, flags);
229 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
230 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
231 spin_unlock_irqrestore(&imxdmac->lock, flags);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200232 return 0;
233 case DMA_SLAVE_CONFIG:
Vinod Kouldb8196d2011-10-13 22:34:23 +0530234 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200235 imxdmac->per_address = dmaengine_cfg->src_addr;
236 imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
237 imxdmac->word_size = dmaengine_cfg->src_addr_width;
238 } else {
239 imxdmac->per_address = dmaengine_cfg->dst_addr;
240 imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
241 imxdmac->word_size = dmaengine_cfg->dst_addr_width;
242 }
243
244 switch (imxdmac->word_size) {
245 case DMA_SLAVE_BUSWIDTH_1_BYTE:
246 mode = IMX_DMA_MEMSIZE_8;
247 break;
248 case DMA_SLAVE_BUSWIDTH_2_BYTES:
249 mode = IMX_DMA_MEMSIZE_16;
250 break;
251 default:
252 case DMA_SLAVE_BUSWIDTH_4_BYTES:
253 mode = IMX_DMA_MEMSIZE_32;
254 break;
255 }
256 ret = imx_dma_config_channel(imxdmac->imxdma_channel,
257 mode | IMX_DMA_TYPE_FIFO,
258 IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
259 imxdmac->dma_request, 1);
260
261 if (ret)
262 return ret;
263
Sascha Hauer6584cb82011-07-06 11:18:33 +0200264 imx_dma_config_burstlen(imxdmac->imxdma_channel,
265 imxdmac->watermark_level * imxdmac->word_size);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200266
267 return 0;
268 default:
269 return -ENOSYS;
270 }
271
272 return -EINVAL;
273}
274
275static enum dma_status imxdma_tx_status(struct dma_chan *chan,
276 dma_cookie_t cookie,
277 struct dma_tx_state *txstate)
278{
279 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
280 dma_cookie_t last_used;
281 enum dma_status ret;
Javier Martin9e15db72012-03-02 09:28:47 +0100282 unsigned long flags;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200283
Javier Martin9e15db72012-03-02 09:28:47 +0100284 spin_lock_irqsave(&imxdmac->lock, flags);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200285 last_used = chan->cookie;
286
287 ret = dma_async_is_complete(cookie, imxdmac->last_completed, last_used);
288 dma_set_tx_state(txstate, imxdmac->last_completed, last_used, 0);
Javier Martin9e15db72012-03-02 09:28:47 +0100289 spin_unlock_irqrestore(&imxdmac->lock, flags);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200290
291 return ret;
292}
293
294static dma_cookie_t imxdma_assign_cookie(struct imxdma_channel *imxdma)
295{
296 dma_cookie_t cookie = imxdma->chan.cookie;
297
298 if (++cookie < 0)
299 cookie = 1;
300
301 imxdma->chan.cookie = cookie;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200302
303 return cookie;
304}
305
306static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
307{
308 struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
309 dma_cookie_t cookie;
Javier Martin9e15db72012-03-02 09:28:47 +0100310 unsigned long flags;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200311
Javier Martin9e15db72012-03-02 09:28:47 +0100312 spin_lock_irqsave(&imxdmac->lock, flags);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200313
Javier Martin9e15db72012-03-02 09:28:47 +0100314 list_move_tail(imxdmac->ld_free.next, &imxdmac->ld_queue);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200315 cookie = imxdma_assign_cookie(imxdmac);
Javier Martin9e15db72012-03-02 09:28:47 +0100316 tx->cookie = cookie;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200317
Javier Martin9e15db72012-03-02 09:28:47 +0100318 spin_unlock_irqrestore(&imxdmac->lock, flags);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200319
320 return cookie;
321}
322
323static int imxdma_alloc_chan_resources(struct dma_chan *chan)
324{
325 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
326 struct imx_dma_data *data = chan->private;
327
Javier Martin6c05f092012-02-28 17:08:17 +0100328 if (data != NULL)
329 imxdmac->dma_request = data->dma_request;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200330
Javier Martin9e15db72012-03-02 09:28:47 +0100331 while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) {
332 struct imxdma_desc *desc;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200333
Javier Martin9e15db72012-03-02 09:28:47 +0100334 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
335 if (!desc)
336 break;
337 __memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor));
338 dma_async_tx_descriptor_init(&desc->desc, chan);
339 desc->desc.tx_submit = imxdma_tx_submit;
340 /* txd.flags will be overwritten in prep funcs */
341 desc->desc.flags = DMA_CTRL_ACK;
342 desc->status = DMA_SUCCESS;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200343
Javier Martin9e15db72012-03-02 09:28:47 +0100344 list_add_tail(&desc->node, &imxdmac->ld_free);
345 imxdmac->descs_allocated++;
346 }
347
348 if (!imxdmac->descs_allocated)
349 return -ENOMEM;
350
351 return imxdmac->descs_allocated;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200352}
353
354static void imxdma_free_chan_resources(struct dma_chan *chan)
355{
356 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
Javier Martin9e15db72012-03-02 09:28:47 +0100357 struct imxdma_desc *desc, *_desc;
358 unsigned long flags;
359
360 spin_lock_irqsave(&imxdmac->lock, flags);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200361
362 imx_dma_disable(imxdmac->imxdma_channel);
Javier Martin9e15db72012-03-02 09:28:47 +0100363 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
364 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
365
366 spin_unlock_irqrestore(&imxdmac->lock, flags);
367
368 list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) {
369 kfree(desc);
370 imxdmac->descs_allocated--;
371 }
372 INIT_LIST_HEAD(&imxdmac->ld_free);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200373
374 if (imxdmac->sg_list) {
375 kfree(imxdmac->sg_list);
376 imxdmac->sg_list = NULL;
377 }
378}
379
380static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
381 struct dma_chan *chan, struct scatterlist *sgl,
Vinod Kouldb8196d2011-10-13 22:34:23 +0530382 unsigned int sg_len, enum dma_transfer_direction direction,
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200383 unsigned long flags)
384{
385 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
386 struct scatterlist *sg;
Javier Martin9e15db72012-03-02 09:28:47 +0100387 int i, dma_length = 0;
388 struct imxdma_desc *desc;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200389
Javier Martin9e15db72012-03-02 09:28:47 +0100390 if (list_empty(&imxdmac->ld_free) ||
391 imxdma_chan_is_doing_cyclic(imxdmac))
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200392 return NULL;
393
Javier Martin9e15db72012-03-02 09:28:47 +0100394 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200395
396 for_each_sg(sgl, sg, sg_len, i) {
397 dma_length += sg->length;
398 }
399
Sascha Hauerd07102a2011-01-12 14:13:23 +0100400 switch (imxdmac->word_size) {
401 case DMA_SLAVE_BUSWIDTH_4_BYTES:
402 if (sgl->length & 3 || sgl->dma_address & 3)
403 return NULL;
404 break;
405 case DMA_SLAVE_BUSWIDTH_2_BYTES:
406 if (sgl->length & 1 || sgl->dma_address & 1)
407 return NULL;
408 break;
409 case DMA_SLAVE_BUSWIDTH_1_BYTE:
410 break;
411 default:
412 return NULL;
413 }
414
Javier Martin9e15db72012-03-02 09:28:47 +0100415 desc->type = IMXDMA_DESC_SLAVE_SG;
416 desc->sg = sgl;
417 desc->sgcount = sg_len;
418 desc->len = dma_length;
419 if (direction == DMA_DEV_TO_MEM) {
420 desc->dmamode = DMA_MODE_READ;
421 desc->src = imxdmac->per_address;
422 } else {
423 desc->dmamode = DMA_MODE_WRITE;
424 desc->dest = imxdmac->per_address;
425 }
426 desc->desc.callback = NULL;
427 desc->desc.callback_param = NULL;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200428
Javier Martin9e15db72012-03-02 09:28:47 +0100429 return &desc->desc;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200430}
431
432static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
433 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
Vinod Kouldb8196d2011-10-13 22:34:23 +0530434 size_t period_len, enum dma_transfer_direction direction)
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200435{
436 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
437 struct imxdma_engine *imxdma = imxdmac->imxdma;
Javier Martin9e15db72012-03-02 09:28:47 +0100438 struct imxdma_desc *desc;
439 int i;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200440 unsigned int periods = buf_len / period_len;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200441
442 dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n",
443 __func__, imxdmac->channel, buf_len, period_len);
444
Javier Martin9e15db72012-03-02 09:28:47 +0100445 if (list_empty(&imxdmac->ld_free) ||
446 imxdma_chan_is_doing_cyclic(imxdmac))
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200447 return NULL;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200448
Javier Martin9e15db72012-03-02 09:28:47 +0100449 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200450
451 if (imxdmac->sg_list)
452 kfree(imxdmac->sg_list);
453
454 imxdmac->sg_list = kcalloc(periods + 1,
455 sizeof(struct scatterlist), GFP_KERNEL);
456 if (!imxdmac->sg_list)
457 return NULL;
458
459 sg_init_table(imxdmac->sg_list, periods);
460
461 for (i = 0; i < periods; i++) {
462 imxdmac->sg_list[i].page_link = 0;
463 imxdmac->sg_list[i].offset = 0;
464 imxdmac->sg_list[i].dma_address = dma_addr;
465 imxdmac->sg_list[i].length = period_len;
466 dma_addr += period_len;
467 }
468
469 /* close the loop */
470 imxdmac->sg_list[periods].offset = 0;
471 imxdmac->sg_list[periods].length = 0;
472 imxdmac->sg_list[periods].page_link =
473 ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
474
Javier Martin9e15db72012-03-02 09:28:47 +0100475 desc->type = IMXDMA_DESC_CYCLIC;
476 desc->sg = imxdmac->sg_list;
477 desc->sgcount = periods;
478 desc->len = IMX_DMA_LENGTH_LOOP;
479 if (direction == DMA_DEV_TO_MEM) {
480 desc->dmamode = DMA_MODE_READ;
481 desc->src = imxdmac->per_address;
482 } else {
483 desc->dmamode = DMA_MODE_WRITE;
484 desc->dest = imxdmac->per_address;
485 }
486 desc->desc.callback = NULL;
487 desc->desc.callback_param = NULL;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200488
Javier Martin9e15db72012-03-02 09:28:47 +0100489 return &desc->desc;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200490}
491
Javier Martin6c05f092012-02-28 17:08:17 +0100492static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
493 struct dma_chan *chan, dma_addr_t dest,
494 dma_addr_t src, size_t len, unsigned long flags)
495{
496 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
497 struct imxdma_engine *imxdma = imxdmac->imxdma;
Javier Martin9e15db72012-03-02 09:28:47 +0100498 struct imxdma_desc *desc;
Javier Martin6c05f092012-02-28 17:08:17 +0100499
500 dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n",
501 __func__, imxdmac->channel, src, dest, len);
502
Javier Martin9e15db72012-03-02 09:28:47 +0100503 if (list_empty(&imxdmac->ld_free) ||
504 imxdma_chan_is_doing_cyclic(imxdmac))
Javier Martin6c05f092012-02-28 17:08:17 +0100505 return NULL;
506
Javier Martin9e15db72012-03-02 09:28:47 +0100507 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
Javier Martin6c05f092012-02-28 17:08:17 +0100508
Javier Martin9e15db72012-03-02 09:28:47 +0100509 desc->type = IMXDMA_DESC_MEMCPY;
510 desc->src = src;
511 desc->dest = dest;
512 desc->len = len;
513 desc->dmamode = DMA_MODE_WRITE;
514 desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
515 desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
516 desc->desc.callback = NULL;
517 desc->desc.callback_param = NULL;
518
519 return &desc->desc;
Javier Martin6c05f092012-02-28 17:08:17 +0100520}
521
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200522static void imxdma_issue_pending(struct dma_chan *chan)
523{
Sascha Hauer5b316872012-01-09 10:32:49 +0100524 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
Javier Martin9e15db72012-03-02 09:28:47 +0100525 struct imxdma_engine *imxdma = imxdmac->imxdma;
526 struct imxdma_desc *desc;
527 unsigned long flags;
Sascha Hauer5b316872012-01-09 10:32:49 +0100528
Javier Martin9e15db72012-03-02 09:28:47 +0100529 spin_lock_irqsave(&imxdmac->lock, flags);
530 if (list_empty(&imxdmac->ld_active) &&
531 !list_empty(&imxdmac->ld_queue)) {
532 desc = list_first_entry(&imxdmac->ld_queue,
533 struct imxdma_desc, node);
534
535 if (imxdma_xfer_desc(desc) < 0) {
536 dev_warn(imxdma->dev,
537 "%s: channel: %d couldn't issue DMA xfer\n",
538 __func__, imxdmac->channel);
539 } else {
540 list_move_tail(imxdmac->ld_queue.next,
541 &imxdmac->ld_active);
542 }
543 }
544 spin_unlock_irqrestore(&imxdmac->lock, flags);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200545}
546
547static int __init imxdma_probe(struct platform_device *pdev)
548{
549 struct imxdma_engine *imxdma;
550 int ret, i;
551
552 imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL);
553 if (!imxdma)
554 return -ENOMEM;
555
556 INIT_LIST_HEAD(&imxdma->dma_device.channels);
557
Sascha Hauerf8a356f2011-01-31 11:35:59 +0100558 dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
559 dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
Javier Martin6c05f092012-02-28 17:08:17 +0100560 dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
Sascha Hauerf8a356f2011-01-31 11:35:59 +0100561
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200562 /* Initialize channel parameters */
563 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
564 struct imxdma_channel *imxdmac = &imxdma->channel[i];
565
566 imxdmac->imxdma_channel = imx_dma_request_by_prio("dmaengine",
567 DMA_PRIO_MEDIUM);
Sascha Hauer8267f162010-10-20 08:37:19 +0200568 if ((int)imxdmac->channel < 0) {
569 ret = -ENODEV;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200570 goto err_init;
Sascha Hauer8267f162010-10-20 08:37:19 +0200571 }
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200572
573 imx_dma_setup_handlers(imxdmac->imxdma_channel,
574 imxdma_irq_handler, imxdma_err_handler, imxdmac);
575
576 imxdmac->imxdma = imxdma;
577 spin_lock_init(&imxdmac->lock);
578
Javier Martin9e15db72012-03-02 09:28:47 +0100579 INIT_LIST_HEAD(&imxdmac->ld_queue);
580 INIT_LIST_HEAD(&imxdmac->ld_free);
581 INIT_LIST_HEAD(&imxdmac->ld_active);
582
583 tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet,
584 (unsigned long)imxdmac);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200585 imxdmac->chan.device = &imxdma->dma_device;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200586 imxdmac->channel = i;
587
588 /* Add the channel to the DMAC list */
Javier Martin9e15db72012-03-02 09:28:47 +0100589 list_add_tail(&imxdmac->chan.device_node,
590 &imxdma->dma_device.channels);
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200591 }
592
593 imxdma->dev = &pdev->dev;
594 imxdma->dma_device.dev = &pdev->dev;
595
596 imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
597 imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
598 imxdma->dma_device.device_tx_status = imxdma_tx_status;
599 imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
600 imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
Javier Martin6c05f092012-02-28 17:08:17 +0100601 imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200602 imxdma->dma_device.device_control = imxdma_control;
603 imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
604
605 platform_set_drvdata(pdev, imxdma);
606
Javier Martin6c05f092012-02-28 17:08:17 +0100607 imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */
Sascha Hauer1e070a62011-01-12 13:14:37 +0100608 imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
609 dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
610
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200611 ret = dma_async_device_register(&imxdma->dma_device);
612 if (ret) {
613 dev_err(&pdev->dev, "unable to register\n");
614 goto err_init;
615 }
616
617 return 0;
618
619err_init:
Axel Lincbeae412010-11-02 09:12:57 +0800620 while (--i >= 0) {
Sascha Hauer1f1846c2010-10-06 10:25:55 +0200621 struct imxdma_channel *imxdmac = &imxdma->channel[i];
622 imx_dma_free(imxdmac->imxdma_channel);
623 }
624
625 kfree(imxdma);
626 return ret;
627}
628
629static int __exit imxdma_remove(struct platform_device *pdev)
630{
631 struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
632 int i;
633
634 dma_async_device_unregister(&imxdma->dma_device);
635
636 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
637 struct imxdma_channel *imxdmac = &imxdma->channel[i];
638
639 imx_dma_free(imxdmac->imxdma_channel);
640 }
641
642 kfree(imxdma);
643
644 return 0;
645}
646
647static struct platform_driver imxdma_driver = {
648 .driver = {
649 .name = "imx-dma",
650 },
651 .remove = __exit_p(imxdma_remove),
652};
653
654static int __init imxdma_module_init(void)
655{
656 return platform_driver_probe(&imxdma_driver, imxdma_probe);
657}
658subsys_initcall(imxdma_module_init);
659
660MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
661MODULE_DESCRIPTION("i.MX dma driver");
662MODULE_LICENSE("GPL");