blob: 9f4310f013f5020029b89f72867ffcbfcc00d85c [file] [log] [blame]
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001/*
2 * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
3 * AVR32 systems.)
4 *
5 * Copyright (C) 2007-2008 Atmel Corporation
Viresh Kumaraecb7b62011-05-24 14:04:09 +05306 * Copyright (C) 2010-2011 ST Microelectronics
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07007 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/clk.h>
13#include <linux/delay.h>
14#include <linux/dmaengine.h>
15#include <linux/dma-mapping.h>
16#include <linux/init.h>
17#include <linux/interrupt.h>
18#include <linux/io.h>
19#include <linux/mm.h>
20#include <linux/module.h>
21#include <linux/platform_device.h>
22#include <linux/slab.h>
23
24#include "dw_dmac_regs.h"
25
26/*
27 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
28 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
29 * of which use ARM any more). See the "Databook" from Synopsys for
30 * information beyond what licensees probably provide.
31 *
32 * The driver has currently been tested only with the Atmel AT32AP7000,
33 * which does not support descriptor writeback.
34 */
35
Jamie Ilesf301c062011-01-21 14:11:53 +000036#define DWC_DEFAULT_CTLLO(private) ({ \
37 struct dw_dma_slave *__slave = (private); \
38 int dms = __slave ? __slave->dst_master : 0; \
39 int sms = __slave ? __slave->src_master : 1; \
Viresh Kumare51dc532011-03-03 15:47:25 +053040 u8 smsize = __slave ? __slave->src_msize : DW_DMA_MSIZE_16; \
41 u8 dmsize = __slave ? __slave->dst_msize : DW_DMA_MSIZE_16; \
Jamie Ilesf301c062011-01-21 14:11:53 +000042 \
Viresh KUMARee665092011-03-04 15:42:51 +053043 (DWC_CTLL_DST_MSIZE(dmsize) \
44 | DWC_CTLL_SRC_MSIZE(smsize) \
Jamie Ilesf301c062011-01-21 14:11:53 +000045 | DWC_CTLL_LLP_D_EN \
46 | DWC_CTLL_LLP_S_EN \
47 | DWC_CTLL_DMS(dms) \
48 | DWC_CTLL_SMS(sms)); \
49 })
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070050
51/*
52 * This is configuration-dependent and usually a funny size like 4095.
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070053 *
54 * Note that this is a transfer count, i.e. if we transfer 32-bit
Viresh Kumar418e7402011-03-04 15:42:50 +053055 * words, we can do 16380 bytes per descriptor.
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070056 *
57 * This parameter is also system-specific.
58 */
Viresh Kumar418e7402011-03-04 15:42:50 +053059#define DWC_MAX_COUNT 4095U
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070060
61/*
62 * Number of descriptors to allocate for each channel. This should be
63 * made configurable somehow; preferably, the clients (at least the
64 * ones using slave transfers) should be able to give us a hint.
65 */
66#define NR_DESCS_PER_CHANNEL 64
67
68/*----------------------------------------------------------------------*/
69
70/*
71 * Because we're not relying on writeback from the controller (it may not
72 * even be configured into the core!) we don't need to use dma_pool. These
73 * descriptors -- and associated data -- are cacheable. We do need to make
74 * sure their dcache entries are written back before handing them off to
75 * the controller, though.
76 */
77
Dan Williams41d5e592009-01-06 11:38:21 -070078static struct device *chan2dev(struct dma_chan *chan)
79{
80 return &chan->dev->device;
81}
82static struct device *chan2parent(struct dma_chan *chan)
83{
84 return chan->dev->device.parent;
85}
86
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070087static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
88{
89 return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
90}
91
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070092static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
93{
94 struct dw_desc *desc, *_desc;
95 struct dw_desc *ret = NULL;
96 unsigned int i = 0;
Viresh Kumar69cea5a2011-04-15 16:03:35 +053097 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -070098
Viresh Kumar69cea5a2011-04-15 16:03:35 +053099 spin_lock_irqsave(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700100 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
101 if (async_tx_test_ack(&desc->txd)) {
102 list_del(&desc->desc_node);
103 ret = desc;
104 break;
105 }
Dan Williams41d5e592009-01-06 11:38:21 -0700106 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700107 i++;
108 }
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530109 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700110
Dan Williams41d5e592009-01-06 11:38:21 -0700111 dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700112
113 return ret;
114}
115
116static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
117{
118 struct dw_desc *child;
119
Dan Williamse0bd0f82009-09-08 17:53:02 -0700120 list_for_each_entry(child, &desc->tx_list, desc_node)
Dan Williams41d5e592009-01-06 11:38:21 -0700121 dma_sync_single_for_cpu(chan2parent(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700122 child->txd.phys, sizeof(child->lli),
123 DMA_TO_DEVICE);
Dan Williams41d5e592009-01-06 11:38:21 -0700124 dma_sync_single_for_cpu(chan2parent(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700125 desc->txd.phys, sizeof(desc->lli),
126 DMA_TO_DEVICE);
127}
128
129/*
130 * Move a descriptor, including any children, to the free list.
131 * `desc' must not be on any lists.
132 */
133static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
134{
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530135 unsigned long flags;
136
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700137 if (desc) {
138 struct dw_desc *child;
139
140 dwc_sync_desc_for_cpu(dwc, desc);
141
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530142 spin_lock_irqsave(&dwc->lock, flags);
Dan Williamse0bd0f82009-09-08 17:53:02 -0700143 list_for_each_entry(child, &desc->tx_list, desc_node)
Dan Williams41d5e592009-01-06 11:38:21 -0700144 dev_vdbg(chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700145 "moving child desc %p to freelist\n",
146 child);
Dan Williamse0bd0f82009-09-08 17:53:02 -0700147 list_splice_init(&desc->tx_list, &dwc->free_list);
Dan Williams41d5e592009-01-06 11:38:21 -0700148 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700149 list_add(&desc->desc_node, &dwc->free_list);
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530150 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700151 }
152}
153
154/* Called with dwc->lock held and bh disabled */
155static dma_cookie_t
156dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc)
157{
158 dma_cookie_t cookie = dwc->chan.cookie;
159
160 if (++cookie < 0)
161 cookie = 1;
162
163 dwc->chan.cookie = cookie;
164 desc->txd.cookie = cookie;
165
166 return cookie;
167}
168
Viresh Kumar61e183f2011-11-17 16:01:29 +0530169static void dwc_initialize(struct dw_dma_chan *dwc)
170{
171 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
172 struct dw_dma_slave *dws = dwc->chan.private;
173 u32 cfghi = DWC_CFGH_FIFO_MODE;
174 u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
175
176 if (dwc->initialized == true)
177 return;
178
179 if (dws) {
180 /*
181 * We need controller-specific data to set up slave
182 * transfers.
183 */
184 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
185
186 cfghi = dws->cfg_hi;
187 cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
188 }
189
190 channel_writel(dwc, CFG_LO, cfglo);
191 channel_writel(dwc, CFG_HI, cfghi);
192
193 /* Enable interrupts */
194 channel_set_bit(dw, MASK.XFER, dwc->mask);
Viresh Kumar61e183f2011-11-17 16:01:29 +0530195 channel_set_bit(dw, MASK.ERROR, dwc->mask);
196
197 dwc->initialized = true;
198}
199
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700200/*----------------------------------------------------------------------*/
201
202/* Called with dwc->lock held and bh disabled */
203static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
204{
205 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
206
207 /* ASSERT: channel is idle */
208 if (dma_readl(dw, CH_EN) & dwc->mask) {
Dan Williams41d5e592009-01-06 11:38:21 -0700209 dev_err(chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700210 "BUG: Attempted to start non-idle channel\n");
Dan Williams41d5e592009-01-06 11:38:21 -0700211 dev_err(chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700212 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
213 channel_readl(dwc, SAR),
214 channel_readl(dwc, DAR),
215 channel_readl(dwc, LLP),
216 channel_readl(dwc, CTL_HI),
217 channel_readl(dwc, CTL_LO));
218
219 /* The tasklet will hopefully advance the queue... */
220 return;
221 }
222
Viresh Kumar61e183f2011-11-17 16:01:29 +0530223 dwc_initialize(dwc);
224
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700225 channel_writel(dwc, LLP, first->txd.phys);
226 channel_writel(dwc, CTL_LO,
227 DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
228 channel_writel(dwc, CTL_HI, 0);
229 channel_set_bit(dw, CH_EN, dwc->mask);
230}
231
232/*----------------------------------------------------------------------*/
233
234static void
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530235dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
236 bool callback_required)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700237{
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530238 dma_async_tx_callback callback = NULL;
239 void *param = NULL;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700240 struct dma_async_tx_descriptor *txd = &desc->txd;
Viresh Kumare5180762011-03-03 15:47:20 +0530241 struct dw_desc *child;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530242 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700243
Dan Williams41d5e592009-01-06 11:38:21 -0700244 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700245
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530246 spin_lock_irqsave(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700247 dwc->completed = txd->cookie;
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530248 if (callback_required) {
249 callback = txd->callback;
250 param = txd->callback_param;
251 }
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700252
253 dwc_sync_desc_for_cpu(dwc, desc);
Viresh Kumare5180762011-03-03 15:47:20 +0530254
255 /* async_tx_ack */
256 list_for_each_entry(child, &desc->tx_list, desc_node)
257 async_tx_ack(&child->txd);
258 async_tx_ack(&desc->txd);
259
Dan Williamse0bd0f82009-09-08 17:53:02 -0700260 list_splice_init(&desc->tx_list, &dwc->free_list);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700261 list_move(&desc->desc_node, &dwc->free_list);
262
Atsushi Nemoto657a77f2009-09-08 17:53:05 -0700263 if (!dwc->chan.private) {
264 struct device *parent = chan2parent(&dwc->chan);
265 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
266 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
267 dma_unmap_single(parent, desc->lli.dar,
268 desc->len, DMA_FROM_DEVICE);
269 else
270 dma_unmap_page(parent, desc->lli.dar,
271 desc->len, DMA_FROM_DEVICE);
272 }
273 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
274 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
275 dma_unmap_single(parent, desc->lli.sar,
276 desc->len, DMA_TO_DEVICE);
277 else
278 dma_unmap_page(parent, desc->lli.sar,
279 desc->len, DMA_TO_DEVICE);
280 }
281 }
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700282
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530283 spin_unlock_irqrestore(&dwc->lock, flags);
284
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530285 if (callback_required && callback)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700286 callback(param);
287}
288
289static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
290{
291 struct dw_desc *desc, *_desc;
292 LIST_HEAD(list);
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530293 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700294
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530295 spin_lock_irqsave(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700296 if (dma_readl(dw, CH_EN) & dwc->mask) {
Dan Williams41d5e592009-01-06 11:38:21 -0700297 dev_err(chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700298 "BUG: XFER bit set, but channel not idle!\n");
299
300 /* Try to continue after resetting the channel... */
301 channel_clear_bit(dw, CH_EN, dwc->mask);
302 while (dma_readl(dw, CH_EN) & dwc->mask)
303 cpu_relax();
304 }
305
306 /*
307 * Submit queued descriptors ASAP, i.e. before we go through
308 * the completed ones.
309 */
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700310 list_splice_init(&dwc->active_list, &list);
Viresh Kumarf336e422011-03-03 15:47:16 +0530311 if (!list_empty(&dwc->queue)) {
312 list_move(dwc->queue.next, &dwc->active_list);
313 dwc_dostart(dwc, dwc_first_active(dwc));
314 }
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700315
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530316 spin_unlock_irqrestore(&dwc->lock, flags);
317
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700318 list_for_each_entry_safe(desc, _desc, &list, desc_node)
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530319 dwc_descriptor_complete(dwc, desc, true);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700320}
321
322static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
323{
324 dma_addr_t llp;
325 struct dw_desc *desc, *_desc;
326 struct dw_desc *child;
327 u32 status_xfer;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530328 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700329
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530330 spin_lock_irqsave(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700331 llp = channel_readl(dwc, LLP);
332 status_xfer = dma_readl(dw, RAW.XFER);
333
334 if (status_xfer & dwc->mask) {
335 /* Everything we've submitted is done */
336 dma_writel(dw, CLEAR.XFER, dwc->mask);
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530337 spin_unlock_irqrestore(&dwc->lock, flags);
338
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700339 dwc_complete_all(dw, dwc);
340 return;
341 }
342
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530343 if (list_empty(&dwc->active_list)) {
344 spin_unlock_irqrestore(&dwc->lock, flags);
Jamie Iles087809f2011-01-21 14:11:52 +0000345 return;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530346 }
Jamie Iles087809f2011-01-21 14:11:52 +0000347
Dan Williams41d5e592009-01-06 11:38:21 -0700348 dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700349
350 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
Viresh Kumar84adccf2011-03-24 11:32:15 +0530351 /* check first descriptors addr */
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530352 if (desc->txd.phys == llp) {
353 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700354 return;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530355 }
Viresh Kumar84adccf2011-03-24 11:32:15 +0530356
357 /* check first descriptors llp */
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530358 if (desc->lli.llp == llp) {
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700359 /* This one is currently in progress */
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530360 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700361 return;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530362 }
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700363
Dan Williamse0bd0f82009-09-08 17:53:02 -0700364 list_for_each_entry(child, &desc->tx_list, desc_node)
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530365 if (child->lli.llp == llp) {
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700366 /* Currently in progress */
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530367 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700368 return;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530369 }
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700370
371 /*
372 * No descriptors so far seem to be in progress, i.e.
373 * this one must be done.
374 */
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530375 spin_unlock_irqrestore(&dwc->lock, flags);
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530376 dwc_descriptor_complete(dwc, desc, true);
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530377 spin_lock_irqsave(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700378 }
379
Dan Williams41d5e592009-01-06 11:38:21 -0700380 dev_err(chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700381 "BUG: All descriptors done, but channel not idle!\n");
382
383 /* Try to continue after resetting the channel... */
384 channel_clear_bit(dw, CH_EN, dwc->mask);
385 while (dma_readl(dw, CH_EN) & dwc->mask)
386 cpu_relax();
387
388 if (!list_empty(&dwc->queue)) {
Viresh Kumarf336e422011-03-03 15:47:16 +0530389 list_move(dwc->queue.next, &dwc->active_list);
390 dwc_dostart(dwc, dwc_first_active(dwc));
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700391 }
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530392 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700393}
394
395static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
396{
Dan Williams41d5e592009-01-06 11:38:21 -0700397 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700398 " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
399 lli->sar, lli->dar, lli->llp,
400 lli->ctlhi, lli->ctllo);
401}
402
403static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
404{
405 struct dw_desc *bad_desc;
406 struct dw_desc *child;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530407 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700408
409 dwc_scan_descriptors(dw, dwc);
410
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530411 spin_lock_irqsave(&dwc->lock, flags);
412
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700413 /*
414 * The descriptor currently at the head of the active list is
415 * borked. Since we don't have any way to report errors, we'll
416 * just have to scream loudly and try to carry on.
417 */
418 bad_desc = dwc_first_active(dwc);
419 list_del_init(&bad_desc->desc_node);
Viresh Kumarf336e422011-03-03 15:47:16 +0530420 list_move(dwc->queue.next, dwc->active_list.prev);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700421
422 /* Clear the error flag and try to restart the controller */
423 dma_writel(dw, CLEAR.ERROR, dwc->mask);
424 if (!list_empty(&dwc->active_list))
425 dwc_dostart(dwc, dwc_first_active(dwc));
426
427 /*
428 * KERN_CRITICAL may seem harsh, but since this only happens
429 * when someone submits a bad physical address in a
430 * descriptor, we should consider ourselves lucky that the
431 * controller flagged an error instead of scribbling over
432 * random memory locations.
433 */
Dan Williams41d5e592009-01-06 11:38:21 -0700434 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700435 "Bad descriptor submitted for DMA!\n");
Dan Williams41d5e592009-01-06 11:38:21 -0700436 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700437 " cookie: %d\n", bad_desc->txd.cookie);
438 dwc_dump_lli(dwc, &bad_desc->lli);
Dan Williamse0bd0f82009-09-08 17:53:02 -0700439 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700440 dwc_dump_lli(dwc, &child->lli);
441
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530442 spin_unlock_irqrestore(&dwc->lock, flags);
443
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700444 /* Pretend the descriptor completed successfully */
Viresh Kumar5fedefb2011-04-15 16:03:35 +0530445 dwc_descriptor_complete(dwc, bad_desc, true);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700446}
447
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200448/* --------------------- Cyclic DMA API extensions -------------------- */
449
450inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
451{
452 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
453 return channel_readl(dwc, SAR);
454}
455EXPORT_SYMBOL(dw_dma_get_src_addr);
456
457inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
458{
459 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
460 return channel_readl(dwc, DAR);
461}
462EXPORT_SYMBOL(dw_dma_get_dst_addr);
463
464/* called with dwc->lock held and all DMAC interrupts disabled */
465static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
Viresh Kumarff7b05f2012-02-01 16:12:23 +0530466 u32 status_err, u32 status_xfer)
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200467{
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530468 unsigned long flags;
469
Viresh Kumarff7b05f2012-02-01 16:12:23 +0530470 if (dwc->mask) {
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200471 void (*callback)(void *param);
472 void *callback_param;
473
474 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
475 channel_readl(dwc, LLP));
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200476
477 callback = dwc->cdesc->period_callback;
478 callback_param = dwc->cdesc->period_callback_param;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530479
480 if (callback)
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200481 callback(callback_param);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200482 }
483
484 /*
485 * Error and transfer complete are highly unlikely, and will most
486 * likely be due to a configuration error by the user.
487 */
488 if (unlikely(status_err & dwc->mask) ||
489 unlikely(status_xfer & dwc->mask)) {
490 int i;
491
492 dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
493 "interrupt, stopping DMA transfer\n",
494 status_xfer ? "xfer" : "error");
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530495
496 spin_lock_irqsave(&dwc->lock, flags);
497
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200498 dev_err(chan2dev(&dwc->chan),
499 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
500 channel_readl(dwc, SAR),
501 channel_readl(dwc, DAR),
502 channel_readl(dwc, LLP),
503 channel_readl(dwc, CTL_HI),
504 channel_readl(dwc, CTL_LO));
505
506 channel_clear_bit(dw, CH_EN, dwc->mask);
507 while (dma_readl(dw, CH_EN) & dwc->mask)
508 cpu_relax();
509
510 /* make sure DMA does not restart by loading a new list */
511 channel_writel(dwc, LLP, 0);
512 channel_writel(dwc, CTL_LO, 0);
513 channel_writel(dwc, CTL_HI, 0);
514
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200515 dma_writel(dw, CLEAR.ERROR, dwc->mask);
516 dma_writel(dw, CLEAR.XFER, dwc->mask);
517
518 for (i = 0; i < dwc->cdesc->periods; i++)
519 dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530520
521 spin_unlock_irqrestore(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200522 }
523}
524
525/* ------------------------------------------------------------------------- */
526
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700527static void dw_dma_tasklet(unsigned long data)
528{
529 struct dw_dma *dw = (struct dw_dma *)data;
530 struct dw_dma_chan *dwc;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700531 u32 status_xfer;
532 u32 status_err;
533 int i;
534
Haavard Skinnemoen7fe7b2f2008-10-03 15:23:46 -0700535 status_xfer = dma_readl(dw, RAW.XFER);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700536 status_err = dma_readl(dw, RAW.ERROR);
537
Viresh Kumarff7b05f2012-02-01 16:12:23 +0530538 dev_vdbg(dw->dma.dev, "tasklet: status_err=%x\n", status_err);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700539
540 for (i = 0; i < dw->dma.chancnt; i++) {
541 dwc = &dw->chan[i];
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200542 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
Viresh Kumarff7b05f2012-02-01 16:12:23 +0530543 dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +0200544 else if (status_err & (1 << i))
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700545 dwc_handle_error(dw, dwc);
Viresh Kumarff7b05f2012-02-01 16:12:23 +0530546 else if (status_xfer & (1 << i))
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700547 dwc_scan_descriptors(dw, dwc);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700548 }
549
550 /*
Viresh Kumarff7b05f2012-02-01 16:12:23 +0530551 * Re-enable interrupts.
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700552 */
553 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700554 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
555}
556
557static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
558{
559 struct dw_dma *dw = dev_id;
560 u32 status;
561
562 dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n",
563 dma_readl(dw, STATUS_INT));
564
565 /*
566 * Just disable the interrupts. We'll turn them back on in the
567 * softirq handler.
568 */
569 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700570 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
571
572 status = dma_readl(dw, STATUS_INT);
573 if (status) {
574 dev_err(dw->dma.dev,
575 "BUG: Unexpected interrupts pending: 0x%x\n",
576 status);
577
578 /* Try to recover */
579 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700580 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
581 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
582 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
583 }
584
585 tasklet_schedule(&dw->tasklet);
586
587 return IRQ_HANDLED;
588}
589
590/*----------------------------------------------------------------------*/
591
592static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
593{
594 struct dw_desc *desc = txd_to_dw_desc(tx);
595 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
596 dma_cookie_t cookie;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530597 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700598
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530599 spin_lock_irqsave(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700600 cookie = dwc_assign_cookie(dwc, desc);
601
602 /*
603 * REVISIT: We should attempt to chain as many descriptors as
604 * possible, perhaps even appending to those already submitted
605 * for DMA. But this is hard to do in a race-free manner.
606 */
607 if (list_empty(&dwc->active_list)) {
Dan Williams41d5e592009-01-06 11:38:21 -0700608 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700609 desc->txd.cookie);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700610 list_add_tail(&desc->desc_node, &dwc->active_list);
Viresh Kumarf336e422011-03-03 15:47:16 +0530611 dwc_dostart(dwc, dwc_first_active(dwc));
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700612 } else {
Dan Williams41d5e592009-01-06 11:38:21 -0700613 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700614 desc->txd.cookie);
615
616 list_add_tail(&desc->desc_node, &dwc->queue);
617 }
618
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530619 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700620
621 return cookie;
622}
623
624static struct dma_async_tx_descriptor *
625dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
626 size_t len, unsigned long flags)
627{
628 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
629 struct dw_desc *desc;
630 struct dw_desc *first;
631 struct dw_desc *prev;
632 size_t xfer_count;
633 size_t offset;
634 unsigned int src_width;
635 unsigned int dst_width;
636 u32 ctllo;
637
Dan Williams41d5e592009-01-06 11:38:21 -0700638 dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700639 dest, src, len, flags);
640
641 if (unlikely(!len)) {
Dan Williams41d5e592009-01-06 11:38:21 -0700642 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700643 return NULL;
644 }
645
646 /*
647 * We can be a lot more clever here, but this should take care
648 * of the most common optimization.
649 */
Viresh Kumara0227452011-03-03 15:47:18 +0530650 if (!((src | dest | len) & 7))
651 src_width = dst_width = 3;
652 else if (!((src | dest | len) & 3))
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700653 src_width = dst_width = 2;
654 else if (!((src | dest | len) & 1))
655 src_width = dst_width = 1;
656 else
657 src_width = dst_width = 0;
658
Jamie Ilesf301c062011-01-21 14:11:53 +0000659 ctllo = DWC_DEFAULT_CTLLO(chan->private)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700660 | DWC_CTLL_DST_WIDTH(dst_width)
661 | DWC_CTLL_SRC_WIDTH(src_width)
662 | DWC_CTLL_DST_INC
663 | DWC_CTLL_SRC_INC
664 | DWC_CTLL_FC_M2M;
665 prev = first = NULL;
666
667 for (offset = 0; offset < len; offset += xfer_count << src_width) {
668 xfer_count = min_t(size_t, (len - offset) >> src_width,
669 DWC_MAX_COUNT);
670
671 desc = dwc_desc_get(dwc);
672 if (!desc)
673 goto err_desc_get;
674
675 desc->lli.sar = src + offset;
676 desc->lli.dar = dest + offset;
677 desc->lli.ctllo = ctllo;
678 desc->lli.ctlhi = xfer_count;
679
680 if (!first) {
681 first = desc;
682 } else {
683 prev->lli.llp = desc->txd.phys;
Dan Williams41d5e592009-01-06 11:38:21 -0700684 dma_sync_single_for_device(chan2parent(chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700685 prev->txd.phys, sizeof(prev->lli),
686 DMA_TO_DEVICE);
687 list_add_tail(&desc->desc_node,
Dan Williamse0bd0f82009-09-08 17:53:02 -0700688 &first->tx_list);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700689 }
690 prev = desc;
691 }
692
693
694 if (flags & DMA_PREP_INTERRUPT)
695 /* Trigger interrupt after last block */
696 prev->lli.ctllo |= DWC_CTLL_INT_EN;
697
698 prev->lli.llp = 0;
Dan Williams41d5e592009-01-06 11:38:21 -0700699 dma_sync_single_for_device(chan2parent(chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700700 prev->txd.phys, sizeof(prev->lli),
701 DMA_TO_DEVICE);
702
703 first->txd.flags = flags;
704 first->len = len;
705
706 return &first->txd;
707
708err_desc_get:
709 dwc_desc_put(dwc, first);
710 return NULL;
711}
712
713static struct dma_async_tx_descriptor *
714dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
Vinod Kouldb8196d2011-10-13 22:34:23 +0530715 unsigned int sg_len, enum dma_transfer_direction direction,
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700716 unsigned long flags)
717{
718 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
Dan Williams287d8592009-02-18 14:48:26 -0800719 struct dw_dma_slave *dws = chan->private;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700720 struct dw_desc *prev;
721 struct dw_desc *first;
722 u32 ctllo;
723 dma_addr_t reg;
724 unsigned int reg_width;
725 unsigned int mem_width;
726 unsigned int i;
727 struct scatterlist *sg;
728 size_t total_len = 0;
729
Dan Williams41d5e592009-01-06 11:38:21 -0700730 dev_vdbg(chan2dev(chan), "prep_dma_slave\n");
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700731
732 if (unlikely(!dws || !sg_len))
733 return NULL;
734
Dan Williams74465b42009-01-06 11:38:16 -0700735 reg_width = dws->reg_width;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700736 prev = first = NULL;
737
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700738 switch (direction) {
Vinod Kouldb8196d2011-10-13 22:34:23 +0530739 case DMA_MEM_TO_DEV:
Jamie Ilesf301c062011-01-21 14:11:53 +0000740 ctllo = (DWC_DEFAULT_CTLLO(chan->private)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700741 | DWC_CTLL_DST_WIDTH(reg_width)
742 | DWC_CTLL_DST_FIX
743 | DWC_CTLL_SRC_INC
Viresh KUMARee665092011-03-04 15:42:51 +0530744 | DWC_CTLL_FC(dws->fc));
Dan Williams74465b42009-01-06 11:38:16 -0700745 reg = dws->tx_reg;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700746 for_each_sg(sgl, sg, sg_len, i) {
747 struct dw_desc *desc;
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530748 u32 len, dlen, mem;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700749
750 mem = sg_phys(sg);
751 len = sg_dma_len(sg);
Viresh Kumar6bc711f2012-02-01 16:12:25 +0530752
753 if (!((mem | len) & 7))
754 mem_width = 3;
755 else if (!((mem | len) & 3))
756 mem_width = 2;
757 else if (!((mem | len) & 1))
758 mem_width = 1;
759 else
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700760 mem_width = 0;
761
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530762slave_sg_todev_fill_desc:
763 desc = dwc_desc_get(dwc);
764 if (!desc) {
765 dev_err(chan2dev(chan),
766 "not enough descriptors available\n");
767 goto err_desc_get;
768 }
769
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700770 desc->lli.sar = mem;
771 desc->lli.dar = reg;
772 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530773 if ((len >> mem_width) > DWC_MAX_COUNT) {
774 dlen = DWC_MAX_COUNT << mem_width;
775 mem += dlen;
776 len -= dlen;
777 } else {
778 dlen = len;
779 len = 0;
780 }
781
782 desc->lli.ctlhi = dlen >> mem_width;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700783
784 if (!first) {
785 first = desc;
786 } else {
787 prev->lli.llp = desc->txd.phys;
Dan Williams41d5e592009-01-06 11:38:21 -0700788 dma_sync_single_for_device(chan2parent(chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700789 prev->txd.phys,
790 sizeof(prev->lli),
791 DMA_TO_DEVICE);
792 list_add_tail(&desc->desc_node,
Dan Williamse0bd0f82009-09-08 17:53:02 -0700793 &first->tx_list);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700794 }
795 prev = desc;
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530796 total_len += dlen;
797
798 if (len)
799 goto slave_sg_todev_fill_desc;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700800 }
801 break;
Vinod Kouldb8196d2011-10-13 22:34:23 +0530802 case DMA_DEV_TO_MEM:
Jamie Ilesf301c062011-01-21 14:11:53 +0000803 ctllo = (DWC_DEFAULT_CTLLO(chan->private)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700804 | DWC_CTLL_SRC_WIDTH(reg_width)
805 | DWC_CTLL_DST_INC
806 | DWC_CTLL_SRC_FIX
Viresh KUMARee665092011-03-04 15:42:51 +0530807 | DWC_CTLL_FC(dws->fc));
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700808
Dan Williams74465b42009-01-06 11:38:16 -0700809 reg = dws->rx_reg;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700810 for_each_sg(sgl, sg, sg_len, i) {
811 struct dw_desc *desc;
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530812 u32 len, dlen, mem;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700813
814 mem = sg_phys(sg);
815 len = sg_dma_len(sg);
Viresh Kumar6bc711f2012-02-01 16:12:25 +0530816
817 if (!((mem | len) & 7))
818 mem_width = 3;
819 else if (!((mem | len) & 3))
820 mem_width = 2;
821 else if (!((mem | len) & 1))
822 mem_width = 1;
823 else
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700824 mem_width = 0;
825
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530826slave_sg_fromdev_fill_desc:
827 desc = dwc_desc_get(dwc);
828 if (!desc) {
829 dev_err(chan2dev(chan),
830 "not enough descriptors available\n");
831 goto err_desc_get;
832 }
833
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700834 desc->lli.sar = reg;
835 desc->lli.dar = mem;
836 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530837 if ((len >> reg_width) > DWC_MAX_COUNT) {
838 dlen = DWC_MAX_COUNT << reg_width;
839 mem += dlen;
840 len -= dlen;
841 } else {
842 dlen = len;
843 len = 0;
844 }
845 desc->lli.ctlhi = dlen >> reg_width;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700846
847 if (!first) {
848 first = desc;
849 } else {
850 prev->lli.llp = desc->txd.phys;
Dan Williams41d5e592009-01-06 11:38:21 -0700851 dma_sync_single_for_device(chan2parent(chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700852 prev->txd.phys,
853 sizeof(prev->lli),
854 DMA_TO_DEVICE);
855 list_add_tail(&desc->desc_node,
Dan Williamse0bd0f82009-09-08 17:53:02 -0700856 &first->tx_list);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700857 }
858 prev = desc;
Viresh Kumar69dc14b2011-04-18 14:54:56 +0530859 total_len += dlen;
860
861 if (len)
862 goto slave_sg_fromdev_fill_desc;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700863 }
864 break;
865 default:
866 return NULL;
867 }
868
869 if (flags & DMA_PREP_INTERRUPT)
870 /* Trigger interrupt after last block */
871 prev->lli.ctllo |= DWC_CTLL_INT_EN;
872
873 prev->lli.llp = 0;
Dan Williams41d5e592009-01-06 11:38:21 -0700874 dma_sync_single_for_device(chan2parent(chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700875 prev->txd.phys, sizeof(prev->lli),
876 DMA_TO_DEVICE);
877
878 first->len = total_len;
879
880 return &first->txd;
881
882err_desc_get:
883 dwc_desc_put(dwc, first);
884 return NULL;
885}
886
Linus Walleij05827632010-05-17 16:30:42 -0700887static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
888 unsigned long arg)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700889{
890 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
891 struct dw_dma *dw = to_dw_dma(chan->device);
892 struct dw_desc *desc, *_desc;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530893 unsigned long flags;
Linus Walleija7c57cf2011-04-19 08:31:32 +0800894 u32 cfglo;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700895 LIST_HEAD(list);
896
Linus Walleija7c57cf2011-04-19 08:31:32 +0800897 if (cmd == DMA_PAUSE) {
898 spin_lock_irqsave(&dwc->lock, flags);
899
900 cfglo = channel_readl(dwc, CFG_LO);
901 channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
902 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
903 cpu_relax();
904
905 dwc->paused = true;
906 spin_unlock_irqrestore(&dwc->lock, flags);
907 } else if (cmd == DMA_RESUME) {
908 if (!dwc->paused)
909 return 0;
910
911 spin_lock_irqsave(&dwc->lock, flags);
912
913 cfglo = channel_readl(dwc, CFG_LO);
914 channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
915 dwc->paused = false;
916
917 spin_unlock_irqrestore(&dwc->lock, flags);
918 } else if (cmd == DMA_TERMINATE_ALL) {
919 spin_lock_irqsave(&dwc->lock, flags);
920
921 channel_clear_bit(dw, CH_EN, dwc->mask);
922 while (dma_readl(dw, CH_EN) & dwc->mask)
923 cpu_relax();
924
925 dwc->paused = false;
926
927 /* active_list entries will end up before queued entries */
928 list_splice_init(&dwc->queue, &list);
929 list_splice_init(&dwc->active_list, &list);
930
931 spin_unlock_irqrestore(&dwc->lock, flags);
932
933 /* Flush all pending and queued descriptors */
934 list_for_each_entry_safe(desc, _desc, &list, desc_node)
935 dwc_descriptor_complete(dwc, desc, false);
936 } else
Linus Walleijc3635c72010-03-26 16:44:01 -0700937 return -ENXIO;
938
Linus Walleijc3635c72010-03-26 16:44:01 -0700939 return 0;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700940}
941
942static enum dma_status
Linus Walleij07934482010-03-26 16:50:49 -0700943dwc_tx_status(struct dma_chan *chan,
944 dma_cookie_t cookie,
945 struct dma_tx_state *txstate)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700946{
947 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
948 dma_cookie_t last_used;
949 dma_cookie_t last_complete;
950 int ret;
951
952 last_complete = dwc->completed;
953 last_used = chan->cookie;
954
955 ret = dma_async_is_complete(cookie, last_complete, last_used);
956 if (ret != DMA_SUCCESS) {
957 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
958
959 last_complete = dwc->completed;
960 last_used = chan->cookie;
961
962 ret = dma_async_is_complete(cookie, last_complete, last_used);
963 }
964
Viresh Kumarabf53902011-04-15 16:03:35 +0530965 if (ret != DMA_SUCCESS)
966 dma_set_tx_state(txstate, last_complete, last_used,
967 dwc_first_active(dwc)->len);
968 else
969 dma_set_tx_state(txstate, last_complete, last_used, 0);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700970
Linus Walleija7c57cf2011-04-19 08:31:32 +0800971 if (dwc->paused)
972 return DMA_PAUSED;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700973
974 return ret;
975}
976
977static void dwc_issue_pending(struct dma_chan *chan)
978{
979 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
980
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700981 if (!list_empty(&dwc->queue))
982 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700983}
984
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700985static int dwc_alloc_chan_resources(struct dma_chan *chan)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700986{
987 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
988 struct dw_dma *dw = to_dw_dma(chan->device);
989 struct dw_desc *desc;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700990 int i;
Viresh Kumar69cea5a2011-04-15 16:03:35 +0530991 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700992
Dan Williams41d5e592009-01-06 11:38:21 -0700993 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700994
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700995 /* ASSERT: channel is idle */
996 if (dma_readl(dw, CH_EN) & dwc->mask) {
Dan Williams41d5e592009-01-06 11:38:21 -0700997 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -0700998 return -EIO;
999 }
1000
1001 dwc->completed = chan->cookie = 1;
1002
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001003 /*
1004 * NOTE: some controllers may have additional features that we
1005 * need to initialize here, like "scatter-gather" (which
1006 * doesn't mean what you think it means), and status writeback.
1007 */
1008
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301009 spin_lock_irqsave(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001010 i = dwc->descs_allocated;
1011 while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301012 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001013
1014 desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
1015 if (!desc) {
Dan Williams41d5e592009-01-06 11:38:21 -07001016 dev_info(chan2dev(chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001017 "only allocated %d descriptors\n", i);
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301018 spin_lock_irqsave(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001019 break;
1020 }
1021
Dan Williamse0bd0f82009-09-08 17:53:02 -07001022 INIT_LIST_HEAD(&desc->tx_list);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001023 dma_async_tx_descriptor_init(&desc->txd, chan);
1024 desc->txd.tx_submit = dwc_tx_submit;
1025 desc->txd.flags = DMA_CTRL_ACK;
Dan Williams41d5e592009-01-06 11:38:21 -07001026 desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001027 sizeof(desc->lli), DMA_TO_DEVICE);
1028 dwc_desc_put(dwc, desc);
1029
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301030 spin_lock_irqsave(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001031 i = ++dwc->descs_allocated;
1032 }
1033
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301034 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001035
Dan Williams41d5e592009-01-06 11:38:21 -07001036 dev_dbg(chan2dev(chan),
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001037 "alloc_chan_resources allocated %d descriptors\n", i);
1038
1039 return i;
1040}
1041
1042static void dwc_free_chan_resources(struct dma_chan *chan)
1043{
1044 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1045 struct dw_dma *dw = to_dw_dma(chan->device);
1046 struct dw_desc *desc, *_desc;
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301047 unsigned long flags;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001048 LIST_HEAD(list);
1049
Dan Williams41d5e592009-01-06 11:38:21 -07001050 dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001051 dwc->descs_allocated);
1052
1053 /* ASSERT: channel is idle */
1054 BUG_ON(!list_empty(&dwc->active_list));
1055 BUG_ON(!list_empty(&dwc->queue));
1056 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
1057
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301058 spin_lock_irqsave(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001059 list_splice_init(&dwc->free_list, &list);
1060 dwc->descs_allocated = 0;
Viresh Kumar61e183f2011-11-17 16:01:29 +05301061 dwc->initialized = false;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001062
1063 /* Disable interrupts */
1064 channel_clear_bit(dw, MASK.XFER, dwc->mask);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001065 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1066
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301067 spin_unlock_irqrestore(&dwc->lock, flags);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001068
1069 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
Dan Williams41d5e592009-01-06 11:38:21 -07001070 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1071 dma_unmap_single(chan2parent(chan), desc->txd.phys,
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001072 sizeof(desc->lli), DMA_TO_DEVICE);
1073 kfree(desc);
1074 }
1075
Dan Williams41d5e592009-01-06 11:38:21 -07001076 dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001077}
1078
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001079/* --------------------- Cyclic DMA API extensions -------------------- */
1080
1081/**
1082 * dw_dma_cyclic_start - start the cyclic DMA transfer
1083 * @chan: the DMA channel to start
1084 *
1085 * Must be called with soft interrupts disabled. Returns zero on success or
1086 * -errno on failure.
1087 */
1088int dw_dma_cyclic_start(struct dma_chan *chan)
1089{
1090 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1091 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301092 unsigned long flags;
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001093
1094 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
1095 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
1096 return -ENODEV;
1097 }
1098
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301099 spin_lock_irqsave(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001100
1101 /* assert channel is idle */
1102 if (dma_readl(dw, CH_EN) & dwc->mask) {
1103 dev_err(chan2dev(&dwc->chan),
1104 "BUG: Attempted to start non-idle channel\n");
1105 dev_err(chan2dev(&dwc->chan),
1106 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
1107 channel_readl(dwc, SAR),
1108 channel_readl(dwc, DAR),
1109 channel_readl(dwc, LLP),
1110 channel_readl(dwc, CTL_HI),
1111 channel_readl(dwc, CTL_LO));
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301112 spin_unlock_irqrestore(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001113 return -EBUSY;
1114 }
1115
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001116 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1117 dma_writel(dw, CLEAR.XFER, dwc->mask);
1118
1119 /* setup DMAC channel registers */
1120 channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
1121 channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
1122 channel_writel(dwc, CTL_HI, 0);
1123
1124 channel_set_bit(dw, CH_EN, dwc->mask);
1125
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301126 spin_unlock_irqrestore(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001127
1128 return 0;
1129}
1130EXPORT_SYMBOL(dw_dma_cyclic_start);
1131
1132/**
1133 * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1134 * @chan: the DMA channel to stop
1135 *
1136 * Must be called with soft interrupts disabled.
1137 */
1138void dw_dma_cyclic_stop(struct dma_chan *chan)
1139{
1140 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1141 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301142 unsigned long flags;
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001143
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301144 spin_lock_irqsave(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001145
1146 channel_clear_bit(dw, CH_EN, dwc->mask);
1147 while (dma_readl(dw, CH_EN) & dwc->mask)
1148 cpu_relax();
1149
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301150 spin_unlock_irqrestore(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001151}
1152EXPORT_SYMBOL(dw_dma_cyclic_stop);
1153
1154/**
1155 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1156 * @chan: the DMA channel to prepare
1157 * @buf_addr: physical DMA address where the buffer starts
1158 * @buf_len: total number of bytes for the entire buffer
1159 * @period_len: number of bytes for each period
1160 * @direction: transfer direction, to or from device
1161 *
1162 * Must be called before trying to start the transfer. Returns a valid struct
1163 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1164 */
1165struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1166 dma_addr_t buf_addr, size_t buf_len, size_t period_len,
Vinod Kouldb8196d2011-10-13 22:34:23 +05301167 enum dma_transfer_direction direction)
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001168{
1169 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1170 struct dw_cyclic_desc *cdesc;
1171 struct dw_cyclic_desc *retval = NULL;
1172 struct dw_desc *desc;
1173 struct dw_desc *last = NULL;
1174 struct dw_dma_slave *dws = chan->private;
1175 unsigned long was_cyclic;
1176 unsigned int reg_width;
1177 unsigned int periods;
1178 unsigned int i;
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301179 unsigned long flags;
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001180
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301181 spin_lock_irqsave(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001182 if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301183 spin_unlock_irqrestore(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001184 dev_dbg(chan2dev(&dwc->chan),
1185 "queue and/or active list are not empty\n");
1186 return ERR_PTR(-EBUSY);
1187 }
1188
1189 was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301190 spin_unlock_irqrestore(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001191 if (was_cyclic) {
1192 dev_dbg(chan2dev(&dwc->chan),
1193 "channel already prepared for cyclic DMA\n");
1194 return ERR_PTR(-EBUSY);
1195 }
1196
1197 retval = ERR_PTR(-EINVAL);
1198 reg_width = dws->reg_width;
1199 periods = buf_len / period_len;
1200
1201 /* Check for too big/unaligned periods and unaligned DMA buffer. */
1202 if (period_len > (DWC_MAX_COUNT << reg_width))
1203 goto out_err;
1204 if (unlikely(period_len & ((1 << reg_width) - 1)))
1205 goto out_err;
1206 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1207 goto out_err;
Vinod Kouldb8196d2011-10-13 22:34:23 +05301208 if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM))))
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001209 goto out_err;
1210
1211 retval = ERR_PTR(-ENOMEM);
1212
1213 if (periods > NR_DESCS_PER_CHANNEL)
1214 goto out_err;
1215
1216 cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
1217 if (!cdesc)
1218 goto out_err;
1219
1220 cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
1221 if (!cdesc->desc)
1222 goto out_err_alloc;
1223
1224 for (i = 0; i < periods; i++) {
1225 desc = dwc_desc_get(dwc);
1226 if (!desc)
1227 goto out_err_desc_get;
1228
1229 switch (direction) {
Vinod Kouldb8196d2011-10-13 22:34:23 +05301230 case DMA_MEM_TO_DEV:
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001231 desc->lli.dar = dws->tx_reg;
1232 desc->lli.sar = buf_addr + (period_len * i);
Jamie Ilesf301c062011-01-21 14:11:53 +00001233 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001234 | DWC_CTLL_DST_WIDTH(reg_width)
1235 | DWC_CTLL_SRC_WIDTH(reg_width)
1236 | DWC_CTLL_DST_FIX
1237 | DWC_CTLL_SRC_INC
Viresh KUMARee665092011-03-04 15:42:51 +05301238 | DWC_CTLL_FC(dws->fc)
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001239 | DWC_CTLL_INT_EN);
1240 break;
Vinod Kouldb8196d2011-10-13 22:34:23 +05301241 case DMA_DEV_TO_MEM:
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001242 desc->lli.dar = buf_addr + (period_len * i);
1243 desc->lli.sar = dws->rx_reg;
Jamie Ilesf301c062011-01-21 14:11:53 +00001244 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001245 | DWC_CTLL_SRC_WIDTH(reg_width)
1246 | DWC_CTLL_DST_WIDTH(reg_width)
1247 | DWC_CTLL_DST_INC
1248 | DWC_CTLL_SRC_FIX
Viresh KUMARee665092011-03-04 15:42:51 +05301249 | DWC_CTLL_FC(dws->fc)
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001250 | DWC_CTLL_INT_EN);
1251 break;
1252 default:
1253 break;
1254 }
1255
1256 desc->lli.ctlhi = (period_len >> reg_width);
1257 cdesc->desc[i] = desc;
1258
1259 if (last) {
1260 last->lli.llp = desc->txd.phys;
1261 dma_sync_single_for_device(chan2parent(chan),
1262 last->txd.phys, sizeof(last->lli),
1263 DMA_TO_DEVICE);
1264 }
1265
1266 last = desc;
1267 }
1268
1269 /* lets make a cyclic list */
1270 last->lli.llp = cdesc->desc[0]->txd.phys;
1271 dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
1272 sizeof(last->lli), DMA_TO_DEVICE);
1273
1274 dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu "
1275 "period %zu periods %d\n", buf_addr, buf_len,
1276 period_len, periods);
1277
1278 cdesc->periods = periods;
1279 dwc->cdesc = cdesc;
1280
1281 return cdesc;
1282
1283out_err_desc_get:
1284 while (i--)
1285 dwc_desc_put(dwc, cdesc->desc[i]);
1286out_err_alloc:
1287 kfree(cdesc);
1288out_err:
1289 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1290 return (struct dw_cyclic_desc *)retval;
1291}
1292EXPORT_SYMBOL(dw_dma_cyclic_prep);
1293
1294/**
1295 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1296 * @chan: the DMA channel to free
1297 */
1298void dw_dma_cyclic_free(struct dma_chan *chan)
1299{
1300 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1301 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1302 struct dw_cyclic_desc *cdesc = dwc->cdesc;
1303 int i;
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301304 unsigned long flags;
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001305
1306 dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");
1307
1308 if (!cdesc)
1309 return;
1310
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301311 spin_lock_irqsave(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001312
1313 channel_clear_bit(dw, CH_EN, dwc->mask);
1314 while (dma_readl(dw, CH_EN) & dwc->mask)
1315 cpu_relax();
1316
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001317 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1318 dma_writel(dw, CLEAR.XFER, dwc->mask);
1319
Viresh Kumar69cea5a2011-04-15 16:03:35 +05301320 spin_unlock_irqrestore(&dwc->lock, flags);
Hans-Christian Egtvedtd9de4512009-04-01 15:47:02 +02001321
1322 for (i = 0; i < cdesc->periods; i++)
1323 dwc_desc_put(dwc, cdesc->desc[i]);
1324
1325 kfree(cdesc->desc);
1326 kfree(cdesc);
1327
1328 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1329}
1330EXPORT_SYMBOL(dw_dma_cyclic_free);
1331
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001332/*----------------------------------------------------------------------*/
1333
1334static void dw_dma_off(struct dw_dma *dw)
1335{
Viresh Kumar61e183f2011-11-17 16:01:29 +05301336 int i;
1337
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001338 dma_writel(dw, CFG, 0);
1339
1340 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001341 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1342 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1343 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1344
1345 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
1346 cpu_relax();
Viresh Kumar61e183f2011-11-17 16:01:29 +05301347
1348 for (i = 0; i < dw->dma.chancnt; i++)
1349 dw->chan[i].initialized = false;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001350}
1351
1352static int __init dw_probe(struct platform_device *pdev)
1353{
1354 struct dw_dma_platform_data *pdata;
1355 struct resource *io;
1356 struct dw_dma *dw;
1357 size_t size;
1358 int irq;
1359 int err;
1360 int i;
1361
Viresh Kumar6c618c92012-02-01 16:12:22 +05301362 pdata = dev_get_platdata(&pdev->dev);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001363 if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
1364 return -EINVAL;
1365
1366 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1367 if (!io)
1368 return -EINVAL;
1369
1370 irq = platform_get_irq(pdev, 0);
1371 if (irq < 0)
1372 return irq;
1373
1374 size = sizeof(struct dw_dma);
1375 size += pdata->nr_channels * sizeof(struct dw_dma_chan);
1376 dw = kzalloc(size, GFP_KERNEL);
1377 if (!dw)
1378 return -ENOMEM;
1379
1380 if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) {
1381 err = -EBUSY;
1382 goto err_kfree;
1383 }
1384
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001385 dw->regs = ioremap(io->start, DW_REGLEN);
1386 if (!dw->regs) {
1387 err = -ENOMEM;
1388 goto err_release_r;
1389 }
1390
1391 dw->clk = clk_get(&pdev->dev, "hclk");
1392 if (IS_ERR(dw->clk)) {
1393 err = PTR_ERR(dw->clk);
1394 goto err_clk;
1395 }
1396 clk_enable(dw->clk);
1397
1398 /* force dma off, just in case */
1399 dw_dma_off(dw);
1400
1401 err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw);
1402 if (err)
1403 goto err_irq;
1404
1405 platform_set_drvdata(pdev, dw);
1406
1407 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1408
1409 dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
1410
1411 INIT_LIST_HEAD(&dw->dma.channels);
Barry Song463894702011-09-15 03:06:30 -07001412 for (i = 0; i < pdata->nr_channels; i++) {
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001413 struct dw_dma_chan *dwc = &dw->chan[i];
1414
1415 dwc->chan.device = &dw->dma;
1416 dwc->chan.cookie = dwc->completed = 1;
Viresh Kumarb0c31302011-03-03 15:47:21 +05301417 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1418 list_add_tail(&dwc->chan.device_node,
1419 &dw->dma.channels);
1420 else
1421 list_add(&dwc->chan.device_node, &dw->dma.channels);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001422
Viresh Kumar93317e82011-03-03 15:47:22 +05301423 /* 7 is highest priority & 0 is lowest. */
1424 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
Viresh Kumare8d9f872012-02-01 16:12:21 +05301425 dwc->priority = pdata->nr_channels - i - 1;
Viresh Kumar93317e82011-03-03 15:47:22 +05301426 else
1427 dwc->priority = i;
1428
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001429 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1430 spin_lock_init(&dwc->lock);
1431 dwc->mask = 1 << i;
1432
1433 INIT_LIST_HEAD(&dwc->active_list);
1434 INIT_LIST_HEAD(&dwc->queue);
1435 INIT_LIST_HEAD(&dwc->free_list);
1436
1437 channel_clear_bit(dw, CH_EN, dwc->mask);
1438 }
1439
1440 /* Clear/disable all interrupts on all channels. */
1441 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001442 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1443 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1444 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1445
1446 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001447 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1448 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1449 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1450
1451 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1452 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
Jamie Iles95ea7592011-01-21 14:11:54 +00001453 if (pdata->is_private)
1454 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001455 dw->dma.dev = &pdev->dev;
1456 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1457 dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1458
1459 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1460
1461 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
Linus Walleijc3635c72010-03-26 16:44:01 -07001462 dw->dma.device_control = dwc_control;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001463
Linus Walleij07934482010-03-26 16:50:49 -07001464 dw->dma.device_tx_status = dwc_tx_status;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001465 dw->dma.device_issue_pending = dwc_issue_pending;
1466
1467 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1468
1469 printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
Barry Song463894702011-09-15 03:06:30 -07001470 dev_name(&pdev->dev), pdata->nr_channels);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001471
1472 dma_async_device_register(&dw->dma);
1473
1474 return 0;
1475
1476err_irq:
1477 clk_disable(dw->clk);
1478 clk_put(dw->clk);
1479err_clk:
1480 iounmap(dw->regs);
1481 dw->regs = NULL;
1482err_release_r:
1483 release_resource(io);
1484err_kfree:
1485 kfree(dw);
1486 return err;
1487}
1488
1489static int __exit dw_remove(struct platform_device *pdev)
1490{
1491 struct dw_dma *dw = platform_get_drvdata(pdev);
1492 struct dw_dma_chan *dwc, *_dwc;
1493 struct resource *io;
1494
1495 dw_dma_off(dw);
1496 dma_async_device_unregister(&dw->dma);
1497
1498 free_irq(platform_get_irq(pdev, 0), dw);
1499 tasklet_kill(&dw->tasklet);
1500
1501 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1502 chan.device_node) {
1503 list_del(&dwc->chan.device_node);
1504 channel_clear_bit(dw, CH_EN, dwc->mask);
1505 }
1506
1507 clk_disable(dw->clk);
1508 clk_put(dw->clk);
1509
1510 iounmap(dw->regs);
1511 dw->regs = NULL;
1512
1513 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1514 release_mem_region(io->start, DW_REGLEN);
1515
1516 kfree(dw);
1517
1518 return 0;
1519}
1520
1521static void dw_shutdown(struct platform_device *pdev)
1522{
1523 struct dw_dma *dw = platform_get_drvdata(pdev);
1524
1525 dw_dma_off(platform_get_drvdata(pdev));
1526 clk_disable(dw->clk);
1527}
1528
Magnus Damm4a256b52009-07-08 13:22:18 +02001529static int dw_suspend_noirq(struct device *dev)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001530{
Magnus Damm4a256b52009-07-08 13:22:18 +02001531 struct platform_device *pdev = to_platform_device(dev);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001532 struct dw_dma *dw = platform_get_drvdata(pdev);
1533
1534 dw_dma_off(platform_get_drvdata(pdev));
1535 clk_disable(dw->clk);
Viresh Kumar61e183f2011-11-17 16:01:29 +05301536
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001537 return 0;
1538}
1539
Magnus Damm4a256b52009-07-08 13:22:18 +02001540static int dw_resume_noirq(struct device *dev)
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001541{
Magnus Damm4a256b52009-07-08 13:22:18 +02001542 struct platform_device *pdev = to_platform_device(dev);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001543 struct dw_dma *dw = platform_get_drvdata(pdev);
1544
1545 clk_enable(dw->clk);
1546 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1547 return 0;
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001548}
1549
Alexey Dobriyan47145212009-12-14 18:00:08 -08001550static const struct dev_pm_ops dw_dev_pm_ops = {
Magnus Damm4a256b52009-07-08 13:22:18 +02001551 .suspend_noirq = dw_suspend_noirq,
1552 .resume_noirq = dw_resume_noirq,
Rajeev KUMAR7414a1b2012-02-01 16:12:17 +05301553 .freeze_noirq = dw_suspend_noirq,
1554 .thaw_noirq = dw_resume_noirq,
1555 .restore_noirq = dw_resume_noirq,
1556 .poweroff_noirq = dw_suspend_noirq,
Magnus Damm4a256b52009-07-08 13:22:18 +02001557};
1558
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001559static struct platform_driver dw_driver = {
1560 .remove = __exit_p(dw_remove),
1561 .shutdown = dw_shutdown,
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001562 .driver = {
1563 .name = "dw_dmac",
Magnus Damm4a256b52009-07-08 13:22:18 +02001564 .pm = &dw_dev_pm_ops,
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001565 },
1566};
1567
1568static int __init dw_init(void)
1569{
1570 return platform_driver_probe(&dw_driver, dw_probe);
1571}
Viresh Kumarcb689a72011-03-03 15:47:15 +05301572subsys_initcall(dw_init);
Haavard Skinnemoen3bfb1d22008-07-08 11:59:42 -07001573
1574static void __exit dw_exit(void)
1575{
1576 platform_driver_unregister(&dw_driver);
1577}
1578module_exit(dw_exit);
1579
1580MODULE_LICENSE("GPL v2");
1581MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
Jean Delvaree05503e2011-05-18 16:49:24 +02001582MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
Viresh Kumaraecb7b62011-05-24 14:04:09 +05301583MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");