blob: 60473f00cf1c2b7d476fe141b06d6b7c8bc8b859 [file] [log] [blame]
Rongjun Yingca21a142011-10-27 19:22:39 -07001/*
2 * DMA controller driver for CSR SiRFprimaII
3 *
4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
5 *
6 * Licensed under GPLv2 or later.
7 */
8
9#include <linux/module.h>
10#include <linux/dmaengine.h>
11#include <linux/dma-mapping.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/slab.h>
15#include <linux/of_irq.h>
16#include <linux/of_address.h>
17#include <linux/of_device.h>
18#include <linux/of_platform.h>
19#include <linux/sirfsoc_dma.h>
20
21#define SIRFSOC_DMA_DESCRIPTORS 16
22#define SIRFSOC_DMA_CHANNELS 16
23
24#define SIRFSOC_DMA_CH_ADDR 0x00
25#define SIRFSOC_DMA_CH_XLEN 0x04
26#define SIRFSOC_DMA_CH_YLEN 0x08
27#define SIRFSOC_DMA_CH_CTRL 0x0C
28
29#define SIRFSOC_DMA_WIDTH_0 0x100
30#define SIRFSOC_DMA_CH_VALID 0x140
31#define SIRFSOC_DMA_CH_INT 0x144
32#define SIRFSOC_DMA_INT_EN 0x148
33#define SIRFSOC_DMA_CH_LOOP_CTRL 0x150
34
35#define SIRFSOC_DMA_MODE_CTRL_BIT 4
36#define SIRFSOC_DMA_DIR_CTRL_BIT 5
37
38/* xlen and dma_width register is in 4 bytes boundary */
39#define SIRFSOC_DMA_WORD_LEN 4
40
41struct sirfsoc_dma_desc {
42 struct dma_async_tx_descriptor desc;
43 struct list_head node;
44
45 /* SiRFprimaII 2D-DMA parameters */
46
47 int xlen; /* DMA xlen */
48 int ylen; /* DMA ylen */
49 int width; /* DMA width */
50 int dir;
51 bool cyclic; /* is loop DMA? */
52 u32 addr; /* DMA buffer address */
53};
54
55struct sirfsoc_dma_chan {
56 struct dma_chan chan;
57 struct list_head free;
58 struct list_head prepared;
59 struct list_head queued;
60 struct list_head active;
61 struct list_head completed;
Rongjun Yingca21a142011-10-27 19:22:39 -070062 unsigned long happened_cyclic;
63 unsigned long completed_cyclic;
64
65 /* Lock for this structure */
66 spinlock_t lock;
67
68 int mode;
69};
70
71struct sirfsoc_dma {
72 struct dma_device dma;
73 struct tasklet_struct tasklet;
74 struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS];
75 void __iomem *base;
76 int irq;
77};
78
79#define DRV_NAME "sirfsoc_dma"
80
81/* Convert struct dma_chan to struct sirfsoc_dma_chan */
82static inline
83struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
84{
85 return container_of(c, struct sirfsoc_dma_chan, chan);
86}
87
88/* Convert struct dma_chan to struct sirfsoc_dma */
89static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c)
90{
91 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c);
92 return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]);
93}
94
95/* Execute all queued DMA descriptors */
96static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
97{
98 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
99 int cid = schan->chan.chan_id;
100 struct sirfsoc_dma_desc *sdesc = NULL;
101
102 /*
103 * lock has been held by functions calling this, so we don't hold
104 * lock again
105 */
106
107 sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc,
108 node);
109 /* Move the first queued descriptor to active list */
110 list_move_tail(&schan->queued, &schan->active);
111
112 /* Start the DMA transfer */
113 writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 +
114 cid * 4);
115 writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
116 (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT),
117 sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
118 writel_relaxed(sdesc->xlen, sdma->base + cid * 0x10 +
119 SIRFSOC_DMA_CH_XLEN);
120 writel_relaxed(sdesc->ylen, sdma->base + cid * 0x10 +
121 SIRFSOC_DMA_CH_YLEN);
122 writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) |
123 (1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
124
125 /*
126 * writel has an implict memory write barrier to make sure data is
127 * flushed into memory before starting DMA
128 */
129 writel(sdesc->addr >> 2, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
130
131 if (sdesc->cyclic) {
132 writel((1 << cid) | 1 << (cid + 16) |
133 readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL),
134 sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
135 schan->happened_cyclic = schan->completed_cyclic = 0;
136 }
137}
138
139/* Interrupt handler */
140static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
141{
142 struct sirfsoc_dma *sdma = data;
143 struct sirfsoc_dma_chan *schan;
144 struct sirfsoc_dma_desc *sdesc = NULL;
145 u32 is;
146 int ch;
147
148 is = readl(sdma->base + SIRFSOC_DMA_CH_INT);
149 while ((ch = fls(is) - 1) >= 0) {
150 is &= ~(1 << ch);
151 writel_relaxed(1 << ch, sdma->base + SIRFSOC_DMA_CH_INT);
152 schan = &sdma->channels[ch];
153
154 spin_lock(&schan->lock);
155
156 sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
157 node);
158 if (!sdesc->cyclic) {
159 /* Execute queued descriptors */
160 list_splice_tail_init(&schan->active, &schan->completed);
161 if (!list_empty(&schan->queued))
162 sirfsoc_dma_execute(schan);
163 } else
164 schan->happened_cyclic++;
165
166 spin_unlock(&schan->lock);
167 }
168
169 /* Schedule tasklet */
170 tasklet_schedule(&sdma->tasklet);
171
172 return IRQ_HANDLED;
173}
174
175/* process completed descriptors */
176static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
177{
178 dma_cookie_t last_cookie = 0;
179 struct sirfsoc_dma_chan *schan;
180 struct sirfsoc_dma_desc *sdesc;
181 struct dma_async_tx_descriptor *desc;
182 unsigned long flags;
183 unsigned long happened_cyclic;
184 LIST_HEAD(list);
185 int i;
186
187 for (i = 0; i < sdma->dma.chancnt; i++) {
188 schan = &sdma->channels[i];
189
190 /* Get all completed descriptors */
191 spin_lock_irqsave(&schan->lock, flags);
192 if (!list_empty(&schan->completed)) {
193 list_splice_tail_init(&schan->completed, &list);
194 spin_unlock_irqrestore(&schan->lock, flags);
195
196 /* Execute callbacks and run dependencies */
197 list_for_each_entry(sdesc, &list, node) {
198 desc = &sdesc->desc;
199
200 if (desc->callback)
201 desc->callback(desc->callback_param);
202
203 last_cookie = desc->cookie;
204 dma_run_dependencies(desc);
205 }
206
207 /* Free descriptors */
208 spin_lock_irqsave(&schan->lock, flags);
209 list_splice_tail_init(&list, &schan->free);
Russell King - ARM Linux4d4e58d2012-03-06 22:34:06 +0000210 schan->chan.completed_cookie = last_cookie;
Rongjun Yingca21a142011-10-27 19:22:39 -0700211 spin_unlock_irqrestore(&schan->lock, flags);
212 } else {
213 /* for cyclic channel, desc is always in active list */
214 sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
215 node);
216
217 if (!sdesc || (sdesc && !sdesc->cyclic)) {
218 /* without active cyclic DMA */
219 spin_unlock_irqrestore(&schan->lock, flags);
220 continue;
221 }
222
223 /* cyclic DMA */
224 happened_cyclic = schan->happened_cyclic;
225 spin_unlock_irqrestore(&schan->lock, flags);
226
227 desc = &sdesc->desc;
228 while (happened_cyclic != schan->completed_cyclic) {
229 if (desc->callback)
230 desc->callback(desc->callback_param);
231 schan->completed_cyclic++;
232 }
233 }
234 }
235}
236
237/* DMA Tasklet */
238static void sirfsoc_dma_tasklet(unsigned long data)
239{
240 struct sirfsoc_dma *sdma = (void *)data;
241
242 sirfsoc_dma_process_completed(sdma);
243}
244
245/* Submit descriptor to hardware */
246static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
247{
248 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
249 struct sirfsoc_dma_desc *sdesc;
250 unsigned long flags;
251 dma_cookie_t cookie;
252
253 sdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
254
255 spin_lock_irqsave(&schan->lock, flags);
256
257 /* Move descriptor to queue */
258 list_move_tail(&sdesc->node, &schan->queued);
259
260 /* Update cookie */
261 cookie = schan->chan.cookie + 1;
262 if (cookie <= 0)
263 cookie = 1;
264
265 schan->chan.cookie = cookie;
266 sdesc->desc.cookie = cookie;
267
268 spin_unlock_irqrestore(&schan->lock, flags);
269
270 return cookie;
271}
272
273static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
274 struct dma_slave_config *config)
275{
276 unsigned long flags;
277
278 if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
279 (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
280 return -EINVAL;
281
282 spin_lock_irqsave(&schan->lock, flags);
283 schan->mode = (config->src_maxburst == 4 ? 1 : 0);
284 spin_unlock_irqrestore(&schan->lock, flags);
285
286 return 0;
287}
288
289static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
290{
291 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
292 int cid = schan->chan.chan_id;
293 unsigned long flags;
294
295 writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
296 ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
297 writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
298
299 writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
300 & ~((1 << cid) | 1 << (cid + 16)),
301 sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
302
303 spin_lock_irqsave(&schan->lock, flags);
304 list_splice_tail_init(&schan->active, &schan->free);
305 list_splice_tail_init(&schan->queued, &schan->free);
306 spin_unlock_irqrestore(&schan->lock, flags);
307
308 return 0;
309}
310
311static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
312 unsigned long arg)
313{
314 struct dma_slave_config *config;
315 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
316
317 switch (cmd) {
318 case DMA_TERMINATE_ALL:
319 return sirfsoc_dma_terminate_all(schan);
320 case DMA_SLAVE_CONFIG:
321 config = (struct dma_slave_config *)arg;
322 return sirfsoc_dma_slave_config(schan, config);
323
324 default:
325 break;
326 }
327
328 return -ENOSYS;
329}
330
331/* Alloc channel resources */
332static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
333{
334 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
335 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
336 struct sirfsoc_dma_desc *sdesc;
337 unsigned long flags;
338 LIST_HEAD(descs);
339 int i;
340
341 /* Alloc descriptors for this channel */
342 for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
343 sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL);
344 if (!sdesc) {
345 dev_notice(sdma->dma.dev, "Memory allocation error. "
346 "Allocated only %u descriptors\n", i);
347 break;
348 }
349
350 dma_async_tx_descriptor_init(&sdesc->desc, chan);
351 sdesc->desc.flags = DMA_CTRL_ACK;
352 sdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
353
354 list_add_tail(&sdesc->node, &descs);
355 }
356
357 /* Return error only if no descriptors were allocated */
358 if (i == 0)
359 return -ENOMEM;
360
361 spin_lock_irqsave(&schan->lock, flags);
362
363 list_splice_tail_init(&descs, &schan->free);
364 spin_unlock_irqrestore(&schan->lock, flags);
365
366 return i;
367}
368
369/* Free channel resources */
370static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
371{
372 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
373 struct sirfsoc_dma_desc *sdesc, *tmp;
374 unsigned long flags;
375 LIST_HEAD(descs);
376
377 spin_lock_irqsave(&schan->lock, flags);
378
379 /* Channel must be idle */
380 BUG_ON(!list_empty(&schan->prepared));
381 BUG_ON(!list_empty(&schan->queued));
382 BUG_ON(!list_empty(&schan->active));
383 BUG_ON(!list_empty(&schan->completed));
384
385 /* Move data */
386 list_splice_tail_init(&schan->free, &descs);
387
388 spin_unlock_irqrestore(&schan->lock, flags);
389
390 /* Free descriptors */
391 list_for_each_entry_safe(sdesc, tmp, &descs, node)
392 kfree(sdesc);
393}
394
395/* Send pending descriptor to hardware */
396static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
397{
398 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
399 unsigned long flags;
400
401 spin_lock_irqsave(&schan->lock, flags);
402
403 if (list_empty(&schan->active) && !list_empty(&schan->queued))
404 sirfsoc_dma_execute(schan);
405
406 spin_unlock_irqrestore(&schan->lock, flags);
407}
408
409/* Check request completion status */
410static enum dma_status
411sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
412 struct dma_tx_state *txstate)
413{
414 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
415 unsigned long flags;
416 dma_cookie_t last_used;
417 dma_cookie_t last_complete;
418
419 spin_lock_irqsave(&schan->lock, flags);
420 last_used = schan->chan.cookie;
Russell King - ARM Linux4d4e58d2012-03-06 22:34:06 +0000421 last_complete = schan->chan.completed_cookie;
Rongjun Yingca21a142011-10-27 19:22:39 -0700422 spin_unlock_irqrestore(&schan->lock, flags);
423
424 dma_set_tx_state(txstate, last_complete, last_used, 0);
425 return dma_async_is_complete(cookie, last_complete, last_used);
426}
427
428static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
429 struct dma_chan *chan, struct dma_interleaved_template *xt,
430 unsigned long flags)
431{
432 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
433 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
434 struct sirfsoc_dma_desc *sdesc = NULL;
435 unsigned long iflags;
436 int ret;
437
438 if ((xt->dir != DMA_MEM_TO_DEV) || (xt->dir != DMA_DEV_TO_MEM)) {
439 ret = -EINVAL;
440 goto err_dir;
441 }
442
443 /* Get free descriptor */
444 spin_lock_irqsave(&schan->lock, iflags);
445 if (!list_empty(&schan->free)) {
446 sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
447 node);
448 list_del(&sdesc->node);
449 }
450 spin_unlock_irqrestore(&schan->lock, iflags);
451
452 if (!sdesc) {
453 /* try to free completed descriptors */
454 sirfsoc_dma_process_completed(sdma);
455 ret = 0;
456 goto no_desc;
457 }
458
459 /* Place descriptor in prepared list */
460 spin_lock_irqsave(&schan->lock, iflags);
461
462 /*
463 * Number of chunks in a frame can only be 1 for prima2
464 * and ylen (number of frame - 1) must be at least 0
465 */
466 if ((xt->frame_size == 1) && (xt->numf > 0)) {
467 sdesc->cyclic = 0;
468 sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN;
469 sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) /
470 SIRFSOC_DMA_WORD_LEN;
471 sdesc->ylen = xt->numf - 1;
472 if (xt->dir == DMA_MEM_TO_DEV) {
473 sdesc->addr = xt->src_start;
474 sdesc->dir = 1;
475 } else {
476 sdesc->addr = xt->dst_start;
477 sdesc->dir = 0;
478 }
479
480 list_add_tail(&sdesc->node, &schan->prepared);
481 } else {
482 pr_err("sirfsoc DMA Invalid xfer\n");
483 ret = -EINVAL;
484 goto err_xfer;
485 }
486 spin_unlock_irqrestore(&schan->lock, iflags);
487
488 return &sdesc->desc;
489err_xfer:
490 spin_unlock_irqrestore(&schan->lock, iflags);
491no_desc:
492err_dir:
493 return ERR_PTR(ret);
494}
495
496static struct dma_async_tx_descriptor *
497sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
498 size_t buf_len, size_t period_len,
499 enum dma_transfer_direction direction)
500{
501 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
502 struct sirfsoc_dma_desc *sdesc = NULL;
503 unsigned long iflags;
504
505 /*
506 * we only support cycle transfer with 2 period
507 * If the X-length is set to 0, it would be the loop mode.
508 * The DMA address keeps increasing until reaching the end of a loop
509 * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then
510 * the DMA address goes back to the beginning of this area.
511 * In loop mode, the DMA data region is divided into two parts, BUFA
512 * and BUFB. DMA controller generates interrupts twice in each loop:
513 * when the DMA address reaches the end of BUFA or the end of the
514 * BUFB
515 */
516 if (buf_len != 2 * period_len)
517 return ERR_PTR(-EINVAL);
518
519 /* Get free descriptor */
520 spin_lock_irqsave(&schan->lock, iflags);
521 if (!list_empty(&schan->free)) {
522 sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
523 node);
524 list_del(&sdesc->node);
525 }
526 spin_unlock_irqrestore(&schan->lock, iflags);
527
528 if (!sdesc)
529 return 0;
530
531 /* Place descriptor in prepared list */
532 spin_lock_irqsave(&schan->lock, iflags);
533 sdesc->addr = addr;
534 sdesc->cyclic = 1;
535 sdesc->xlen = 0;
536 sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1;
537 sdesc->width = 1;
538 list_add_tail(&sdesc->node, &schan->prepared);
539 spin_unlock_irqrestore(&schan->lock, iflags);
540
541 return &sdesc->desc;
542}
543
544/*
545 * The DMA controller consists of 16 independent DMA channels.
546 * Each channel is allocated to a different function
547 */
548bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
549{
550 unsigned int ch_nr = (unsigned int) chan_id;
551
552 if (ch_nr == chan->chan_id +
553 chan->device->dev_id * SIRFSOC_DMA_CHANNELS)
554 return true;
555
556 return false;
557}
558EXPORT_SYMBOL(sirfsoc_dma_filter_id);
559
560static int __devinit sirfsoc_dma_probe(struct platform_device *op)
561{
562 struct device_node *dn = op->dev.of_node;
563 struct device *dev = &op->dev;
564 struct dma_device *dma;
565 struct sirfsoc_dma *sdma;
566 struct sirfsoc_dma_chan *schan;
567 struct resource res;
568 ulong regs_start, regs_size;
569 u32 id;
570 int ret, i;
571
572 sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL);
573 if (!sdma) {
574 dev_err(dev, "Memory exhausted!\n");
575 return -ENOMEM;
576 }
577
578 if (of_property_read_u32(dn, "cell-index", &id)) {
579 dev_err(dev, "Fail to get DMAC index\n");
580 ret = -ENODEV;
581 goto free_mem;
582 }
583
584 sdma->irq = irq_of_parse_and_map(dn, 0);
585 if (sdma->irq == NO_IRQ) {
586 dev_err(dev, "Error mapping IRQ!\n");
587 ret = -EINVAL;
588 goto free_mem;
589 }
590
591 ret = of_address_to_resource(dn, 0, &res);
592 if (ret) {
593 dev_err(dev, "Error parsing memory region!\n");
594 goto free_mem;
595 }
596
597 regs_start = res.start;
598 regs_size = resource_size(&res);
599
600 sdma->base = devm_ioremap(dev, regs_start, regs_size);
601 if (!sdma->base) {
602 dev_err(dev, "Error mapping memory region!\n");
603 ret = -ENOMEM;
604 goto irq_dispose;
605 }
606
607 ret = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME,
608 sdma);
609 if (ret) {
610 dev_err(dev, "Error requesting IRQ!\n");
611 ret = -EINVAL;
612 goto unmap_mem;
613 }
614
615 dma = &sdma->dma;
616 dma->dev = dev;
617 dma->chancnt = SIRFSOC_DMA_CHANNELS;
618
619 dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
620 dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
621 dma->device_issue_pending = sirfsoc_dma_issue_pending;
622 dma->device_control = sirfsoc_dma_control;
623 dma->device_tx_status = sirfsoc_dma_tx_status;
624 dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
625 dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
626
627 INIT_LIST_HEAD(&dma->channels);
628 dma_cap_set(DMA_SLAVE, dma->cap_mask);
629 dma_cap_set(DMA_CYCLIC, dma->cap_mask);
630 dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
631 dma_cap_set(DMA_PRIVATE, dma->cap_mask);
632
633 for (i = 0; i < dma->chancnt; i++) {
634 schan = &sdma->channels[i];
635
636 schan->chan.device = dma;
637 schan->chan.cookie = 1;
Russell King - ARM Linux4d4e58d2012-03-06 22:34:06 +0000638 schan->chan.completed_cookie = schan->chan.cookie;
Rongjun Yingca21a142011-10-27 19:22:39 -0700639
640 INIT_LIST_HEAD(&schan->free);
641 INIT_LIST_HEAD(&schan->prepared);
642 INIT_LIST_HEAD(&schan->queued);
643 INIT_LIST_HEAD(&schan->active);
644 INIT_LIST_HEAD(&schan->completed);
645
646 spin_lock_init(&schan->lock);
647 list_add_tail(&schan->chan.device_node, &dma->channels);
648 }
649
650 tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
651
652 /* Register DMA engine */
653 dev_set_drvdata(dev, sdma);
654 ret = dma_async_device_register(dma);
655 if (ret)
656 goto free_irq;
657
658 dev_info(dev, "initialized SIRFSOC DMAC driver\n");
659
660 return 0;
661
662free_irq:
663 devm_free_irq(dev, sdma->irq, sdma);
664irq_dispose:
665 irq_dispose_mapping(sdma->irq);
666unmap_mem:
667 iounmap(sdma->base);
668free_mem:
669 devm_kfree(dev, sdma);
670 return ret;
671}
672
673static int __devexit sirfsoc_dma_remove(struct platform_device *op)
674{
675 struct device *dev = &op->dev;
676 struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
677
678 dma_async_device_unregister(&sdma->dma);
679 devm_free_irq(dev, sdma->irq, sdma);
680 irq_dispose_mapping(sdma->irq);
681 iounmap(sdma->base);
682 devm_kfree(dev, sdma);
683 return 0;
684}
685
686static struct of_device_id sirfsoc_dma_match[] = {
687 { .compatible = "sirf,prima2-dmac", },
688 {},
689};
690
691static struct platform_driver sirfsoc_dma_driver = {
692 .probe = sirfsoc_dma_probe,
693 .remove = __devexit_p(sirfsoc_dma_remove),
694 .driver = {
695 .name = DRV_NAME,
696 .owner = THIS_MODULE,
697 .of_match_table = sirfsoc_dma_match,
698 },
699};
700
Axel Linc94e9102011-11-26 15:11:12 +0800701module_platform_driver(sirfsoc_dma_driver);
Rongjun Yingca21a142011-10-27 19:22:39 -0700702
703MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
704 "Barry Song <baohua.song@csr.com>");
705MODULE_DESCRIPTION("SIRFSOC DMA control driver");
706MODULE_LICENSE("GPL v2");