blob: 35c3936edc4562ef556c4e6452619f0b760e08c2 [file] [log] [blame]
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001/*
2 * Renesas R-Car Gen2 DMA Controller Driver
3 *
4 * Copyright (C) 2014 Renesas Electronics Inc.
5 *
6 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
7 *
8 * This is free software; you can redistribute it and/or modify
9 * it under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 */
12
Laurent Pinchartccadee92014-07-16 23:15:48 +020013#include <linux/dma-mapping.h>
Laurent Pinchart87244fe2014-07-09 00:42:19 +020014#include <linux/dmaengine.h>
15#include <linux/interrupt.h>
16#include <linux/list.h>
17#include <linux/module.h>
18#include <linux/mutex.h>
19#include <linux/of.h>
20#include <linux/of_dma.h>
21#include <linux/of_platform.h>
22#include <linux/platform_device.h>
23#include <linux/pm_runtime.h>
24#include <linux/slab.h>
25#include <linux/spinlock.h>
26
27#include "../dmaengine.h"
28
29/*
30 * struct rcar_dmac_xfer_chunk - Descriptor for a hardware transfer
31 * @node: entry in the parent's chunks list
32 * @src_addr: device source address
33 * @dst_addr: device destination address
34 * @size: transfer size in bytes
35 */
36struct rcar_dmac_xfer_chunk {
37 struct list_head node;
38
39 dma_addr_t src_addr;
40 dma_addr_t dst_addr;
41 u32 size;
42};
43
44/*
Laurent Pinchartccadee92014-07-16 23:15:48 +020045 * struct rcar_dmac_hw_desc - Hardware descriptor for a transfer chunk
46 * @sar: value of the SAR register (source address)
47 * @dar: value of the DAR register (destination address)
48 * @tcr: value of the TCR register (transfer count)
49 */
50struct rcar_dmac_hw_desc {
51 u32 sar;
52 u32 dar;
53 u32 tcr;
54 u32 reserved;
55} __attribute__((__packed__));
56
57/*
Laurent Pinchart87244fe2014-07-09 00:42:19 +020058 * struct rcar_dmac_desc - R-Car Gen2 DMA Transfer Descriptor
59 * @async_tx: base DMA asynchronous transaction descriptor
60 * @direction: direction of the DMA transfer
61 * @xfer_shift: log2 of the transfer size
62 * @chcr: value of the channel configuration register for this transfer
63 * @node: entry in the channel's descriptors lists
64 * @chunks: list of transfer chunks for this transfer
65 * @running: the transfer chunk being currently processed
Laurent Pinchartccadee92014-07-16 23:15:48 +020066 * @nchunks: number of transfer chunks for this transfer
Laurent Pinchart1ed13152014-07-19 00:05:14 +020067 * @hwdescs.use: whether the transfer descriptor uses hardware descriptors
Laurent Pinchartccadee92014-07-16 23:15:48 +020068 * @hwdescs.mem: hardware descriptors memory for the transfer
69 * @hwdescs.dma: device address of the hardware descriptors memory
70 * @hwdescs.size: size of the hardware descriptors in bytes
Laurent Pinchart87244fe2014-07-09 00:42:19 +020071 * @size: transfer size in bytes
72 * @cyclic: when set indicates that the DMA transfer is cyclic
73 */
74struct rcar_dmac_desc {
75 struct dma_async_tx_descriptor async_tx;
76 enum dma_transfer_direction direction;
77 unsigned int xfer_shift;
78 u32 chcr;
79
80 struct list_head node;
81 struct list_head chunks;
82 struct rcar_dmac_xfer_chunk *running;
Laurent Pinchartccadee92014-07-16 23:15:48 +020083 unsigned int nchunks;
84
85 struct {
Laurent Pinchart1ed13152014-07-19 00:05:14 +020086 bool use;
Laurent Pinchartccadee92014-07-16 23:15:48 +020087 struct rcar_dmac_hw_desc *mem;
88 dma_addr_t dma;
89 size_t size;
90 } hwdescs;
Laurent Pinchart87244fe2014-07-09 00:42:19 +020091
92 unsigned int size;
93 bool cyclic;
94};
95
96#define to_rcar_dmac_desc(d) container_of(d, struct rcar_dmac_desc, async_tx)
97
98/*
99 * struct rcar_dmac_desc_page - One page worth of descriptors
100 * @node: entry in the channel's pages list
101 * @descs: array of DMA descriptors
102 * @chunks: array of transfer chunk descriptors
103 */
104struct rcar_dmac_desc_page {
105 struct list_head node;
106
107 union {
108 struct rcar_dmac_desc descs[0];
109 struct rcar_dmac_xfer_chunk chunks[0];
110 };
111};
112
113#define RCAR_DMAC_DESCS_PER_PAGE \
114 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) / \
115 sizeof(struct rcar_dmac_desc))
116#define RCAR_DMAC_XFER_CHUNKS_PER_PAGE \
117 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \
118 sizeof(struct rcar_dmac_xfer_chunk))
119
120/*
Niklas Söderlundc5ed08e2016-08-10 13:22:18 +0200121 * struct rcar_dmac_chan_slave - Slave configuration
122 * @slave_addr: slave memory address
123 * @xfer_size: size (in bytes) of hardware transfers
124 */
125struct rcar_dmac_chan_slave {
126 phys_addr_t slave_addr;
127 unsigned int xfer_size;
128};
129
130/*
Niklas Söderlund9f878602016-08-10 13:22:19 +0200131 * struct rcar_dmac_chan_map - Map of slave device phys to dma address
132 * @addr: slave dma address
133 * @dir: direction of mapping
134 * @slave: slave configuration that is mapped
135 */
136struct rcar_dmac_chan_map {
137 dma_addr_t addr;
138 enum dma_data_direction dir;
139 struct rcar_dmac_chan_slave slave;
140};
141
142/*
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200143 * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel
144 * @chan: base DMA channel object
145 * @iomem: channel I/O memory base
146 * @index: index of this channel in the controller
Niklas Söderlund427d5ec2017-05-16 01:09:15 +0200147 * @irq: channel IRQ
Niklas Söderlundc5ed08e2016-08-10 13:22:18 +0200148 * @src: slave memory address and size on the source side
149 * @dst: slave memory address and size on the destination side
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200150 * @mid_rid: hardware MID/RID for the DMA client using this channel
151 * @lock: protects the channel CHCR register and the desc members
152 * @desc.free: list of free descriptors
153 * @desc.pending: list of pending descriptors (submitted with tx_submit)
154 * @desc.active: list of active descriptors (activated with issue_pending)
155 * @desc.done: list of completed descriptors
156 * @desc.wait: list of descriptors waiting for an ack
157 * @desc.running: the descriptor being processed (a member of the active list)
158 * @desc.chunks_free: list of free transfer chunk descriptors
159 * @desc.pages: list of pages used by allocated descriptors
160 */
161struct rcar_dmac_chan {
162 struct dma_chan chan;
163 void __iomem *iomem;
164 unsigned int index;
Niklas Söderlund427d5ec2017-05-16 01:09:15 +0200165 int irq;
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200166
Niklas Söderlundc5ed08e2016-08-10 13:22:18 +0200167 struct rcar_dmac_chan_slave src;
168 struct rcar_dmac_chan_slave dst;
Niklas Söderlund9f878602016-08-10 13:22:19 +0200169 struct rcar_dmac_chan_map map;
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200170 int mid_rid;
171
172 spinlock_t lock;
173
174 struct {
175 struct list_head free;
176 struct list_head pending;
177 struct list_head active;
178 struct list_head done;
179 struct list_head wait;
180 struct rcar_dmac_desc *running;
181
182 struct list_head chunks_free;
183
184 struct list_head pages;
185 } desc;
186};
187
188#define to_rcar_dmac_chan(c) container_of(c, struct rcar_dmac_chan, chan)
189
190/*
191 * struct rcar_dmac - R-Car Gen2 DMA Controller
192 * @engine: base DMA engine object
193 * @dev: the hardware device
194 * @iomem: remapped I/O memory base
195 * @n_channels: number of available channels
196 * @channels: array of DMAC channels
197 * @modules: bitmask of client modules in use
198 */
199struct rcar_dmac {
200 struct dma_device engine;
201 struct device *dev;
202 void __iomem *iomem;
203
204 unsigned int n_channels;
205 struct rcar_dmac_chan *channels;
206
Joe Perches08acf382015-05-19 18:37:50 -0700207 DECLARE_BITMAP(modules, 256);
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200208};
209
210#define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine)
211
212/* -----------------------------------------------------------------------------
213 * Registers
214 */
215
216#define RCAR_DMAC_CHAN_OFFSET(i) (0x8000 + 0x80 * (i))
217
218#define RCAR_DMAISTA 0x0020
219#define RCAR_DMASEC 0x0030
220#define RCAR_DMAOR 0x0060
221#define RCAR_DMAOR_PRI_FIXED (0 << 8)
222#define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8)
223#define RCAR_DMAOR_AE (1 << 2)
224#define RCAR_DMAOR_DME (1 << 0)
225#define RCAR_DMACHCLR 0x0080
226#define RCAR_DMADPSEC 0x00a0
227
228#define RCAR_DMASAR 0x0000
229#define RCAR_DMADAR 0x0004
230#define RCAR_DMATCR 0x0008
231#define RCAR_DMATCR_MASK 0x00ffffff
232#define RCAR_DMATSR 0x0028
233#define RCAR_DMACHCR 0x000c
234#define RCAR_DMACHCR_CAE (1 << 31)
235#define RCAR_DMACHCR_CAIE (1 << 30)
236#define RCAR_DMACHCR_DPM_DISABLED (0 << 28)
237#define RCAR_DMACHCR_DPM_ENABLED (1 << 28)
238#define RCAR_DMACHCR_DPM_REPEAT (2 << 28)
239#define RCAR_DMACHCR_DPM_INFINITE (3 << 28)
240#define RCAR_DMACHCR_RPT_SAR (1 << 27)
241#define RCAR_DMACHCR_RPT_DAR (1 << 26)
242#define RCAR_DMACHCR_RPT_TCR (1 << 25)
243#define RCAR_DMACHCR_DPB (1 << 22)
244#define RCAR_DMACHCR_DSE (1 << 19)
245#define RCAR_DMACHCR_DSIE (1 << 18)
246#define RCAR_DMACHCR_TS_1B ((0 << 20) | (0 << 3))
247#define RCAR_DMACHCR_TS_2B ((0 << 20) | (1 << 3))
248#define RCAR_DMACHCR_TS_4B ((0 << 20) | (2 << 3))
249#define RCAR_DMACHCR_TS_16B ((0 << 20) | (3 << 3))
250#define RCAR_DMACHCR_TS_32B ((1 << 20) | (0 << 3))
251#define RCAR_DMACHCR_TS_64B ((1 << 20) | (1 << 3))
252#define RCAR_DMACHCR_TS_8B ((1 << 20) | (3 << 3))
253#define RCAR_DMACHCR_DM_FIXED (0 << 14)
254#define RCAR_DMACHCR_DM_INC (1 << 14)
255#define RCAR_DMACHCR_DM_DEC (2 << 14)
256#define RCAR_DMACHCR_SM_FIXED (0 << 12)
257#define RCAR_DMACHCR_SM_INC (1 << 12)
258#define RCAR_DMACHCR_SM_DEC (2 << 12)
259#define RCAR_DMACHCR_RS_AUTO (4 << 8)
260#define RCAR_DMACHCR_RS_DMARS (8 << 8)
261#define RCAR_DMACHCR_IE (1 << 2)
262#define RCAR_DMACHCR_TE (1 << 1)
263#define RCAR_DMACHCR_DE (1 << 0)
264#define RCAR_DMATCRB 0x0018
265#define RCAR_DMATSRB 0x0038
266#define RCAR_DMACHCRB 0x001c
267#define RCAR_DMACHCRB_DCNT(n) ((n) << 24)
Laurent Pinchartccadee92014-07-16 23:15:48 +0200268#define RCAR_DMACHCRB_DPTR_MASK (0xff << 16)
269#define RCAR_DMACHCRB_DPTR_SHIFT 16
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200270#define RCAR_DMACHCRB_DRST (1 << 15)
271#define RCAR_DMACHCRB_DTS (1 << 8)
272#define RCAR_DMACHCRB_SLM_NORMAL (0 << 4)
273#define RCAR_DMACHCRB_SLM_CLK(n) ((8 | (n)) << 4)
274#define RCAR_DMACHCRB_PRI(n) ((n) << 0)
275#define RCAR_DMARS 0x0040
276#define RCAR_DMABUFCR 0x0048
277#define RCAR_DMABUFCR_MBU(n) ((n) << 16)
278#define RCAR_DMABUFCR_ULB(n) ((n) << 0)
279#define RCAR_DMADPBASE 0x0050
280#define RCAR_DMADPBASE_MASK 0xfffffff0
281#define RCAR_DMADPBASE_SEL (1 << 0)
282#define RCAR_DMADPCR 0x0054
283#define RCAR_DMADPCR_DIPT(n) ((n) << 24)
284#define RCAR_DMAFIXSAR 0x0010
285#define RCAR_DMAFIXDAR 0x0014
286#define RCAR_DMAFIXDPBASE 0x0060
287
288/* Hardcode the MEMCPY transfer size to 4 bytes. */
289#define RCAR_DMAC_MEMCPY_XFER_SIZE 4
290
291/* -----------------------------------------------------------------------------
292 * Device access
293 */
294
295static void rcar_dmac_write(struct rcar_dmac *dmac, u32 reg, u32 data)
296{
297 if (reg == RCAR_DMAOR)
298 writew(data, dmac->iomem + reg);
299 else
300 writel(data, dmac->iomem + reg);
301}
302
303static u32 rcar_dmac_read(struct rcar_dmac *dmac, u32 reg)
304{
305 if (reg == RCAR_DMAOR)
306 return readw(dmac->iomem + reg);
307 else
308 return readl(dmac->iomem + reg);
309}
310
311static u32 rcar_dmac_chan_read(struct rcar_dmac_chan *chan, u32 reg)
312{
313 if (reg == RCAR_DMARS)
314 return readw(chan->iomem + reg);
315 else
316 return readl(chan->iomem + reg);
317}
318
319static void rcar_dmac_chan_write(struct rcar_dmac_chan *chan, u32 reg, u32 data)
320{
321 if (reg == RCAR_DMARS)
322 writew(data, chan->iomem + reg);
323 else
324 writel(data, chan->iomem + reg);
325}
326
327/* -----------------------------------------------------------------------------
328 * Initialization and configuration
329 */
330
331static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan)
332{
333 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
334
Niklas Söderlund0f78e3b2016-06-30 17:15:16 +0200335 return !!(chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE));
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200336}
337
338static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan)
339{
340 struct rcar_dmac_desc *desc = chan->desc.running;
Laurent Pinchartccadee92014-07-16 23:15:48 +0200341 u32 chcr = desc->chcr;
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200342
343 WARN_ON_ONCE(rcar_dmac_chan_is_busy(chan));
344
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200345 if (chan->mid_rid >= 0)
346 rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid);
347
Laurent Pinchart1ed13152014-07-19 00:05:14 +0200348 if (desc->hwdescs.use) {
Kuninori Morimoto1175f832017-03-22 04:22:36 +0000349 struct rcar_dmac_xfer_chunk *chunk =
350 list_first_entry(&desc->chunks,
351 struct rcar_dmac_xfer_chunk, node);
Laurent Pinchart3f463062015-01-27 18:33:29 +0200352
Laurent Pinchartccadee92014-07-16 23:15:48 +0200353 dev_dbg(chan->chan.device->dev,
354 "chan%u: queue desc %p: %u@%pad\n",
355 chan->index, desc, desc->nchunks, &desc->hwdescs.dma);
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200356
Laurent Pinchartccadee92014-07-16 23:15:48 +0200357#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
Kuninori Morimoto1175f832017-03-22 04:22:36 +0000358 rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR,
359 chunk->src_addr >> 32);
360 rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR,
361 chunk->dst_addr >> 32);
Laurent Pinchartccadee92014-07-16 23:15:48 +0200362 rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE,
363 desc->hwdescs.dma >> 32);
364#endif
365 rcar_dmac_chan_write(chan, RCAR_DMADPBASE,
366 (desc->hwdescs.dma & 0xfffffff0) |
367 RCAR_DMADPBASE_SEL);
368 rcar_dmac_chan_write(chan, RCAR_DMACHCRB,
369 RCAR_DMACHCRB_DCNT(desc->nchunks - 1) |
370 RCAR_DMACHCRB_DRST);
371
372 /*
Laurent Pinchart3f463062015-01-27 18:33:29 +0200373 * Errata: When descriptor memory is accessed through an IOMMU
374 * the DMADAR register isn't initialized automatically from the
375 * first descriptor at beginning of transfer by the DMAC like it
376 * should. Initialize it manually with the destination address
377 * of the first chunk.
378 */
Laurent Pinchart3f463062015-01-27 18:33:29 +0200379 rcar_dmac_chan_write(chan, RCAR_DMADAR,
380 chunk->dst_addr & 0xffffffff);
381
382 /*
Laurent Pinchartccadee92014-07-16 23:15:48 +0200383 * Program the descriptor stage interrupt to occur after the end
384 * of the first stage.
385 */
386 rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(1));
387
388 chcr |= RCAR_DMACHCR_RPT_SAR | RCAR_DMACHCR_RPT_DAR
389 | RCAR_DMACHCR_RPT_TCR | RCAR_DMACHCR_DPB;
390
391 /*
392 * If the descriptor isn't cyclic enable normal descriptor mode
393 * and the transfer completion interrupt.
394 */
395 if (!desc->cyclic)
396 chcr |= RCAR_DMACHCR_DPM_ENABLED | RCAR_DMACHCR_IE;
397 /*
398 * If the descriptor is cyclic and has a callback enable the
399 * descriptor stage interrupt in infinite repeat mode.
400 */
401 else if (desc->async_tx.callback)
402 chcr |= RCAR_DMACHCR_DPM_INFINITE | RCAR_DMACHCR_DSIE;
403 /*
404 * Otherwise just select infinite repeat mode without any
405 * interrupt.
406 */
407 else
408 chcr |= RCAR_DMACHCR_DPM_INFINITE;
409 } else {
410 struct rcar_dmac_xfer_chunk *chunk = desc->running;
411
412 dev_dbg(chan->chan.device->dev,
413 "chan%u: queue chunk %p: %u@%pad -> %pad\n",
414 chan->index, chunk, chunk->size, &chunk->src_addr,
415 &chunk->dst_addr);
416
417#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
418 rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR,
419 chunk->src_addr >> 32);
420 rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR,
421 chunk->dst_addr >> 32);
422#endif
423 rcar_dmac_chan_write(chan, RCAR_DMASAR,
424 chunk->src_addr & 0xffffffff);
425 rcar_dmac_chan_write(chan, RCAR_DMADAR,
426 chunk->dst_addr & 0xffffffff);
427 rcar_dmac_chan_write(chan, RCAR_DMATCR,
428 chunk->size >> desc->xfer_shift);
429
430 chcr |= RCAR_DMACHCR_DPM_DISABLED | RCAR_DMACHCR_IE;
431 }
432
433 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr | RCAR_DMACHCR_DE);
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200434}
435
436static int rcar_dmac_init(struct rcar_dmac *dmac)
437{
438 u16 dmaor;
439
440 /* Clear all channels and enable the DMAC globally. */
Kuninori Morimoto20c169a2016-03-03 17:25:53 +0900441 rcar_dmac_write(dmac, RCAR_DMACHCLR, GENMASK(dmac->n_channels - 1, 0));
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200442 rcar_dmac_write(dmac, RCAR_DMAOR,
443 RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
444
445 dmaor = rcar_dmac_read(dmac, RCAR_DMAOR);
446 if ((dmaor & (RCAR_DMAOR_AE | RCAR_DMAOR_DME)) != RCAR_DMAOR_DME) {
447 dev_warn(dmac->dev, "DMAOR initialization failed.\n");
448 return -EIO;
449 }
450
451 return 0;
452}
453
454/* -----------------------------------------------------------------------------
455 * Descriptors submission
456 */
457
458static dma_cookie_t rcar_dmac_tx_submit(struct dma_async_tx_descriptor *tx)
459{
460 struct rcar_dmac_chan *chan = to_rcar_dmac_chan(tx->chan);
461 struct rcar_dmac_desc *desc = to_rcar_dmac_desc(tx);
462 unsigned long flags;
463 dma_cookie_t cookie;
464
465 spin_lock_irqsave(&chan->lock, flags);
466
467 cookie = dma_cookie_assign(tx);
468
469 dev_dbg(chan->chan.device->dev, "chan%u: submit #%d@%p\n",
470 chan->index, tx->cookie, desc);
471
472 list_add_tail(&desc->node, &chan->desc.pending);
473 desc->running = list_first_entry(&desc->chunks,
474 struct rcar_dmac_xfer_chunk, node);
475
476 spin_unlock_irqrestore(&chan->lock, flags);
477
478 return cookie;
479}
480
481/* -----------------------------------------------------------------------------
482 * Descriptors allocation and free
483 */
484
485/*
486 * rcar_dmac_desc_alloc - Allocate a page worth of DMA descriptors
487 * @chan: the DMA channel
488 * @gfp: allocation flags
489 */
490static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
491{
492 struct rcar_dmac_desc_page *page;
Kuninori Morimotod23c9a02015-05-21 03:48:38 +0000493 unsigned long flags;
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200494 LIST_HEAD(list);
495 unsigned int i;
496
497 page = (void *)get_zeroed_page(gfp);
498 if (!page)
499 return -ENOMEM;
500
501 for (i = 0; i < RCAR_DMAC_DESCS_PER_PAGE; ++i) {
502 struct rcar_dmac_desc *desc = &page->descs[i];
503
504 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
505 desc->async_tx.tx_submit = rcar_dmac_tx_submit;
506 INIT_LIST_HEAD(&desc->chunks);
507
508 list_add_tail(&desc->node, &list);
509 }
510
Kuninori Morimotod23c9a02015-05-21 03:48:38 +0000511 spin_lock_irqsave(&chan->lock, flags);
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200512 list_splice_tail(&list, &chan->desc.free);
513 list_add_tail(&page->node, &chan->desc.pages);
Kuninori Morimotod23c9a02015-05-21 03:48:38 +0000514 spin_unlock_irqrestore(&chan->lock, flags);
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200515
516 return 0;
517}
518
519/*
520 * rcar_dmac_desc_put - Release a DMA transfer descriptor
521 * @chan: the DMA channel
522 * @desc: the descriptor
523 *
524 * Put the descriptor and its transfer chunk descriptors back in the channel's
Laurent Pinchart1ed13152014-07-19 00:05:14 +0200525 * free descriptors lists. The descriptor's chunks list will be reinitialized to
526 * an empty list as a result.
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200527 *
Laurent Pinchartccadee92014-07-16 23:15:48 +0200528 * The descriptor must have been removed from the channel's lists before calling
529 * this function.
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200530 */
531static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan,
532 struct rcar_dmac_desc *desc)
533{
Laurent Pinchartf3915072015-01-27 15:52:13 +0200534 unsigned long flags;
535
536 spin_lock_irqsave(&chan->lock, flags);
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200537 list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free);
Kuninori Morimoto3565fe52016-05-30 00:41:48 +0000538 list_add(&desc->node, &chan->desc.free);
Laurent Pinchartf3915072015-01-27 15:52:13 +0200539 spin_unlock_irqrestore(&chan->lock, flags);
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200540}
541
542static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan)
543{
544 struct rcar_dmac_desc *desc, *_desc;
Kuninori Morimotod23c9a02015-05-21 03:48:38 +0000545 unsigned long flags;
Laurent Pinchartccadee92014-07-16 23:15:48 +0200546 LIST_HEAD(list);
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200547
Laurent Pinchartccadee92014-07-16 23:15:48 +0200548 /*
549 * We have to temporarily move all descriptors from the wait list to a
550 * local list as iterating over the wait list, even with
551 * list_for_each_entry_safe, isn't safe if we release the channel lock
552 * around the rcar_dmac_desc_put() call.
553 */
Kuninori Morimotod23c9a02015-05-21 03:48:38 +0000554 spin_lock_irqsave(&chan->lock, flags);
Laurent Pinchartccadee92014-07-16 23:15:48 +0200555 list_splice_init(&chan->desc.wait, &list);
Kuninori Morimotod23c9a02015-05-21 03:48:38 +0000556 spin_unlock_irqrestore(&chan->lock, flags);
Laurent Pinchartccadee92014-07-16 23:15:48 +0200557
558 list_for_each_entry_safe(desc, _desc, &list, node) {
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200559 if (async_tx_test_ack(&desc->async_tx)) {
560 list_del(&desc->node);
561 rcar_dmac_desc_put(chan, desc);
562 }
563 }
Laurent Pinchartccadee92014-07-16 23:15:48 +0200564
565 if (list_empty(&list))
566 return;
567
568 /* Put the remaining descriptors back in the wait list. */
Kuninori Morimotod23c9a02015-05-21 03:48:38 +0000569 spin_lock_irqsave(&chan->lock, flags);
Laurent Pinchartccadee92014-07-16 23:15:48 +0200570 list_splice(&list, &chan->desc.wait);
Kuninori Morimotod23c9a02015-05-21 03:48:38 +0000571 spin_unlock_irqrestore(&chan->lock, flags);
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200572}
573
574/*
575 * rcar_dmac_desc_get - Allocate a descriptor for a DMA transfer
576 * @chan: the DMA channel
577 *
578 * Locking: This function must be called in a non-atomic context.
579 *
580 * Return: A pointer to the allocated descriptor or NULL if no descriptor can
581 * be allocated.
582 */
583static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan)
584{
585 struct rcar_dmac_desc *desc;
Kuninori Morimotod23c9a02015-05-21 03:48:38 +0000586 unsigned long flags;
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200587 int ret;
588
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200589 /* Recycle acked descriptors before attempting allocation. */
590 rcar_dmac_desc_recycle_acked(chan);
591
Kuninori Morimotod23c9a02015-05-21 03:48:38 +0000592 spin_lock_irqsave(&chan->lock, flags);
Laurent Pinchartccadee92014-07-16 23:15:48 +0200593
Laurent Pincharta55e07c2015-01-08 18:29:25 +0200594 while (list_empty(&chan->desc.free)) {
595 /*
596 * No free descriptors, allocate a page worth of them and try
597 * again, as someone else could race us to get the newly
598 * allocated descriptors. If the allocation fails return an
599 * error.
600 */
Kuninori Morimotod23c9a02015-05-21 03:48:38 +0000601 spin_unlock_irqrestore(&chan->lock, flags);
Laurent Pincharta55e07c2015-01-08 18:29:25 +0200602 ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT);
603 if (ret < 0)
604 return NULL;
Kuninori Morimotod23c9a02015-05-21 03:48:38 +0000605 spin_lock_irqsave(&chan->lock, flags);
Laurent Pincharta55e07c2015-01-08 18:29:25 +0200606 }
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200607
Laurent Pincharta55e07c2015-01-08 18:29:25 +0200608 desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node);
609 list_del(&desc->node);
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200610
Kuninori Morimotod23c9a02015-05-21 03:48:38 +0000611 spin_unlock_irqrestore(&chan->lock, flags);
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200612
613 return desc;
614}
615
616/*
617 * rcar_dmac_xfer_chunk_alloc - Allocate a page worth of transfer chunks
618 * @chan: the DMA channel
619 * @gfp: allocation flags
620 */
621static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
622{
623 struct rcar_dmac_desc_page *page;
Kuninori Morimotod23c9a02015-05-21 03:48:38 +0000624 unsigned long flags;
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200625 LIST_HEAD(list);
626 unsigned int i;
627
628 page = (void *)get_zeroed_page(gfp);
629 if (!page)
630 return -ENOMEM;
631
632 for (i = 0; i < RCAR_DMAC_XFER_CHUNKS_PER_PAGE; ++i) {
633 struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i];
634
635 list_add_tail(&chunk->node, &list);
636 }
637
Kuninori Morimotod23c9a02015-05-21 03:48:38 +0000638 spin_lock_irqsave(&chan->lock, flags);
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200639 list_splice_tail(&list, &chan->desc.chunks_free);
640 list_add_tail(&page->node, &chan->desc.pages);
Kuninori Morimotod23c9a02015-05-21 03:48:38 +0000641 spin_unlock_irqrestore(&chan->lock, flags);
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200642
643 return 0;
644}
645
646/*
647 * rcar_dmac_xfer_chunk_get - Allocate a transfer chunk for a DMA transfer
648 * @chan: the DMA channel
649 *
650 * Locking: This function must be called in a non-atomic context.
651 *
652 * Return: A pointer to the allocated transfer chunk descriptor or NULL if no
653 * descriptor can be allocated.
654 */
655static struct rcar_dmac_xfer_chunk *
656rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan)
657{
658 struct rcar_dmac_xfer_chunk *chunk;
Kuninori Morimotod23c9a02015-05-21 03:48:38 +0000659 unsigned long flags;
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200660 int ret;
661
Kuninori Morimotod23c9a02015-05-21 03:48:38 +0000662 spin_lock_irqsave(&chan->lock, flags);
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200663
Laurent Pincharta55e07c2015-01-08 18:29:25 +0200664 while (list_empty(&chan->desc.chunks_free)) {
665 /*
666 * No free descriptors, allocate a page worth of them and try
667 * again, as someone else could race us to get the newly
668 * allocated descriptors. If the allocation fails return an
669 * error.
670 */
Kuninori Morimotod23c9a02015-05-21 03:48:38 +0000671 spin_unlock_irqrestore(&chan->lock, flags);
Laurent Pincharta55e07c2015-01-08 18:29:25 +0200672 ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT);
673 if (ret < 0)
674 return NULL;
Kuninori Morimotod23c9a02015-05-21 03:48:38 +0000675 spin_lock_irqsave(&chan->lock, flags);
Laurent Pincharta55e07c2015-01-08 18:29:25 +0200676 }
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200677
Laurent Pincharta55e07c2015-01-08 18:29:25 +0200678 chunk = list_first_entry(&chan->desc.chunks_free,
679 struct rcar_dmac_xfer_chunk, node);
680 list_del(&chunk->node);
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200681
Kuninori Morimotod23c9a02015-05-21 03:48:38 +0000682 spin_unlock_irqrestore(&chan->lock, flags);
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200683
684 return chunk;
685}
686
Laurent Pinchart1ed13152014-07-19 00:05:14 +0200687static void rcar_dmac_realloc_hwdesc(struct rcar_dmac_chan *chan,
688 struct rcar_dmac_desc *desc, size_t size)
689{
690 /*
691 * dma_alloc_coherent() allocates memory in page size increments. To
692 * avoid reallocating the hardware descriptors when the allocated size
693 * wouldn't change align the requested size to a multiple of the page
694 * size.
695 */
696 size = PAGE_ALIGN(size);
697
698 if (desc->hwdescs.size == size)
699 return;
700
701 if (desc->hwdescs.mem) {
Laurent Pinchart6a634802015-01-27 15:58:53 +0200702 dma_free_coherent(chan->chan.device->dev, desc->hwdescs.size,
703 desc->hwdescs.mem, desc->hwdescs.dma);
Laurent Pinchart1ed13152014-07-19 00:05:14 +0200704 desc->hwdescs.mem = NULL;
705 desc->hwdescs.size = 0;
706 }
707
708 if (!size)
709 return;
710
Laurent Pinchart6a634802015-01-27 15:58:53 +0200711 desc->hwdescs.mem = dma_alloc_coherent(chan->chan.device->dev, size,
712 &desc->hwdescs.dma, GFP_NOWAIT);
Laurent Pinchart1ed13152014-07-19 00:05:14 +0200713 if (!desc->hwdescs.mem)
714 return;
715
716 desc->hwdescs.size = size;
717}
718
Jürg Billeteree4b8762014-11-25 15:10:17 +0100719static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan,
720 struct rcar_dmac_desc *desc)
Laurent Pinchartccadee92014-07-16 23:15:48 +0200721{
722 struct rcar_dmac_xfer_chunk *chunk;
723 struct rcar_dmac_hw_desc *hwdesc;
Laurent Pinchartccadee92014-07-16 23:15:48 +0200724
Laurent Pinchart1ed13152014-07-19 00:05:14 +0200725 rcar_dmac_realloc_hwdesc(chan, desc, desc->nchunks * sizeof(*hwdesc));
726
727 hwdesc = desc->hwdescs.mem;
Laurent Pinchartccadee92014-07-16 23:15:48 +0200728 if (!hwdesc)
Jürg Billeteree4b8762014-11-25 15:10:17 +0100729 return -ENOMEM;
Laurent Pinchartccadee92014-07-16 23:15:48 +0200730
Laurent Pinchartccadee92014-07-16 23:15:48 +0200731 list_for_each_entry(chunk, &desc->chunks, node) {
732 hwdesc->sar = chunk->src_addr;
733 hwdesc->dar = chunk->dst_addr;
734 hwdesc->tcr = chunk->size >> desc->xfer_shift;
735 hwdesc++;
736 }
Jürg Billeteree4b8762014-11-25 15:10:17 +0100737
738 return 0;
Laurent Pinchartccadee92014-07-16 23:15:48 +0200739}
740
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200741/* -----------------------------------------------------------------------------
742 * Stop and reset
743 */
744
745static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
746{
747 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
748
Laurent Pinchartccadee92014-07-16 23:15:48 +0200749 chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE |
750 RCAR_DMACHCR_TE | RCAR_DMACHCR_DE);
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200751 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
752}
753
754static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan)
755{
756 struct rcar_dmac_desc *desc, *_desc;
757 unsigned long flags;
758 LIST_HEAD(descs);
759
760 spin_lock_irqsave(&chan->lock, flags);
761
762 /* Move all non-free descriptors to the local lists. */
763 list_splice_init(&chan->desc.pending, &descs);
764 list_splice_init(&chan->desc.active, &descs);
765 list_splice_init(&chan->desc.done, &descs);
766 list_splice_init(&chan->desc.wait, &descs);
767
768 chan->desc.running = NULL;
769
770 spin_unlock_irqrestore(&chan->lock, flags);
771
772 list_for_each_entry_safe(desc, _desc, &descs, node) {
773 list_del(&desc->node);
774 rcar_dmac_desc_put(chan, desc);
775 }
776}
777
778static void rcar_dmac_stop(struct rcar_dmac *dmac)
779{
780 rcar_dmac_write(dmac, RCAR_DMAOR, 0);
781}
782
783static void rcar_dmac_abort(struct rcar_dmac *dmac)
784{
785 unsigned int i;
786
787 /* Stop all channels. */
788 for (i = 0; i < dmac->n_channels; ++i) {
789 struct rcar_dmac_chan *chan = &dmac->channels[i];
790
791 /* Stop and reinitialize the channel. */
792 spin_lock(&chan->lock);
793 rcar_dmac_chan_halt(chan);
794 spin_unlock(&chan->lock);
795
796 rcar_dmac_chan_reinit(chan);
797 }
798}
799
800/* -----------------------------------------------------------------------------
801 * Descriptors preparation
802 */
803
804static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan,
805 struct rcar_dmac_desc *desc)
806{
807 static const u32 chcr_ts[] = {
808 RCAR_DMACHCR_TS_1B, RCAR_DMACHCR_TS_2B,
809 RCAR_DMACHCR_TS_4B, RCAR_DMACHCR_TS_8B,
810 RCAR_DMACHCR_TS_16B, RCAR_DMACHCR_TS_32B,
811 RCAR_DMACHCR_TS_64B,
812 };
813
814 unsigned int xfer_size;
815 u32 chcr;
816
817 switch (desc->direction) {
818 case DMA_DEV_TO_MEM:
819 chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_FIXED
820 | RCAR_DMACHCR_RS_DMARS;
Niklas Söderlundc5ed08e2016-08-10 13:22:18 +0200821 xfer_size = chan->src.xfer_size;
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200822 break;
823
824 case DMA_MEM_TO_DEV:
825 chcr = RCAR_DMACHCR_DM_FIXED | RCAR_DMACHCR_SM_INC
826 | RCAR_DMACHCR_RS_DMARS;
Niklas Söderlundc5ed08e2016-08-10 13:22:18 +0200827 xfer_size = chan->dst.xfer_size;
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200828 break;
829
830 case DMA_MEM_TO_MEM:
831 default:
832 chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_INC
833 | RCAR_DMACHCR_RS_AUTO;
834 xfer_size = RCAR_DMAC_MEMCPY_XFER_SIZE;
835 break;
836 }
837
838 desc->xfer_shift = ilog2(xfer_size);
839 desc->chcr = chcr | chcr_ts[desc->xfer_shift];
840}
841
842/*
843 * rcar_dmac_chan_prep_sg - prepare transfer descriptors from an SG list
844 *
845 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
846 * converted to scatter-gather to guarantee consistent locking and a correct
847 * list manipulation. For slave DMA direction carries the usual meaning, and,
848 * logically, the SG list is RAM and the addr variable contains slave address,
849 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
850 * and the SG list contains only one element and points at the source buffer.
851 */
852static struct dma_async_tx_descriptor *
853rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
854 unsigned int sg_len, dma_addr_t dev_addr,
855 enum dma_transfer_direction dir, unsigned long dma_flags,
856 bool cyclic)
857{
858 struct rcar_dmac_xfer_chunk *chunk;
859 struct rcar_dmac_desc *desc;
860 struct scatterlist *sg;
Laurent Pinchartccadee92014-07-16 23:15:48 +0200861 unsigned int nchunks = 0;
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200862 unsigned int max_chunk_size;
863 unsigned int full_size = 0;
Kuninori Morimoto1175f832017-03-22 04:22:36 +0000864 bool cross_boundary = false;
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200865 unsigned int i;
Kuninori Morimoto1175f832017-03-22 04:22:36 +0000866#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
867 u32 high_dev_addr;
868 u32 high_mem_addr;
869#endif
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200870
871 desc = rcar_dmac_desc_get(chan);
872 if (!desc)
873 return NULL;
874
875 desc->async_tx.flags = dma_flags;
876 desc->async_tx.cookie = -EBUSY;
877
878 desc->cyclic = cyclic;
879 desc->direction = dir;
880
881 rcar_dmac_chan_configure_desc(chan, desc);
882
883 max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift;
884
885 /*
886 * Allocate and fill the transfer chunk descriptors. We own the only
887 * reference to the DMA descriptor, there's no need for locking.
888 */
889 for_each_sg(sgl, sg, sg_len, i) {
890 dma_addr_t mem_addr = sg_dma_address(sg);
891 unsigned int len = sg_dma_len(sg);
892
893 full_size += len;
894
Kuninori Morimoto1175f832017-03-22 04:22:36 +0000895#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
896 if (i == 0) {
897 high_dev_addr = dev_addr >> 32;
898 high_mem_addr = mem_addr >> 32;
899 }
900
901 if ((dev_addr >> 32 != high_dev_addr) ||
902 (mem_addr >> 32 != high_mem_addr))
903 cross_boundary = true;
904#endif
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200905 while (len) {
906 unsigned int size = min(len, max_chunk_size);
907
908#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
909 /*
910 * Prevent individual transfers from crossing 4GB
911 * boundaries.
912 */
Kuninori Morimoto1175f832017-03-22 04:22:36 +0000913 if (dev_addr >> 32 != (dev_addr + size - 1) >> 32) {
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200914 size = ALIGN(dev_addr, 1ULL << 32) - dev_addr;
Kuninori Morimoto1175f832017-03-22 04:22:36 +0000915 cross_boundary = true;
916 }
917 if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) {
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200918 size = ALIGN(mem_addr, 1ULL << 32) - mem_addr;
Kuninori Morimoto1175f832017-03-22 04:22:36 +0000919 cross_boundary = true;
920 }
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200921#endif
922
923 chunk = rcar_dmac_xfer_chunk_get(chan);
924 if (!chunk) {
925 rcar_dmac_desc_put(chan, desc);
926 return NULL;
927 }
928
929 if (dir == DMA_DEV_TO_MEM) {
930 chunk->src_addr = dev_addr;
931 chunk->dst_addr = mem_addr;
932 } else {
933 chunk->src_addr = mem_addr;
934 chunk->dst_addr = dev_addr;
935 }
936
937 chunk->size = size;
938
939 dev_dbg(chan->chan.device->dev,
940 "chan%u: chunk %p/%p sgl %u@%p, %u/%u %pad -> %pad\n",
941 chan->index, chunk, desc, i, sg, size, len,
942 &chunk->src_addr, &chunk->dst_addr);
943
944 mem_addr += size;
945 if (dir == DMA_MEM_TO_MEM)
946 dev_addr += size;
947
948 len -= size;
949
950 list_add_tail(&chunk->node, &desc->chunks);
Laurent Pinchartccadee92014-07-16 23:15:48 +0200951 nchunks++;
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200952 }
953 }
954
Laurent Pinchartccadee92014-07-16 23:15:48 +0200955 desc->nchunks = nchunks;
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200956 desc->size = full_size;
957
Laurent Pinchartccadee92014-07-16 23:15:48 +0200958 /*
959 * Use hardware descriptor lists if possible when more than one chunk
960 * needs to be transferred (otherwise they don't make much sense).
961 *
Kuninori Morimoto1175f832017-03-22 04:22:36 +0000962 * Source/Destination address should be located in same 4GiB region
963 * in the 40bit address space when it uses Hardware descriptor,
964 * and cross_boundary is checking it.
Laurent Pinchartccadee92014-07-16 23:15:48 +0200965 */
Kuninori Morimoto1175f832017-03-22 04:22:36 +0000966 desc->hwdescs.use = !cross_boundary && nchunks > 1;
Jürg Billeteree4b8762014-11-25 15:10:17 +0100967 if (desc->hwdescs.use) {
968 if (rcar_dmac_fill_hwdesc(chan, desc) < 0)
969 desc->hwdescs.use = false;
970 }
Laurent Pinchartccadee92014-07-16 23:15:48 +0200971
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200972 return &desc->async_tx;
973}
974
975/* -----------------------------------------------------------------------------
976 * DMA engine operations
977 */
978
979static int rcar_dmac_alloc_chan_resources(struct dma_chan *chan)
980{
981 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
982 int ret;
983
Laurent Pinchart87244fe2014-07-09 00:42:19 +0200984 INIT_LIST_HEAD(&rchan->desc.chunks_free);
985 INIT_LIST_HEAD(&rchan->desc.pages);
986
987 /* Preallocate descriptors. */
988 ret = rcar_dmac_xfer_chunk_alloc(rchan, GFP_KERNEL);
989 if (ret < 0)
990 return -ENOMEM;
991
992 ret = rcar_dmac_desc_alloc(rchan, GFP_KERNEL);
993 if (ret < 0)
994 return -ENOMEM;
995
996 return pm_runtime_get_sync(chan->device->dev);
997}
998
999static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
1000{
1001 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1002 struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
Niklas Söderlund3139dc82017-01-11 15:39:31 +01001003 struct rcar_dmac_chan_map *map = &rchan->map;
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001004 struct rcar_dmac_desc_page *page, *_page;
Laurent Pinchart1ed13152014-07-19 00:05:14 +02001005 struct rcar_dmac_desc *desc;
1006 LIST_HEAD(list);
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001007
1008 /* Protect against ISR */
1009 spin_lock_irq(&rchan->lock);
1010 rcar_dmac_chan_halt(rchan);
1011 spin_unlock_irq(&rchan->lock);
1012
Niklas Söderlunda1ed64e2017-05-16 01:09:17 +02001013 /*
1014 * Now no new interrupts will occur, but one might already be
1015 * running. Wait for it to finish before freeing resources.
1016 */
1017 synchronize_irq(rchan->irq);
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001018
1019 if (rchan->mid_rid >= 0) {
1020 /* The caller is holding dma_list_mutex */
1021 clear_bit(rchan->mid_rid, dmac->modules);
1022 rchan->mid_rid = -EINVAL;
1023 }
1024
Laurent Pinchartf7638c92015-01-27 15:58:53 +02001025 list_splice_init(&rchan->desc.free, &list);
1026 list_splice_init(&rchan->desc.pending, &list);
1027 list_splice_init(&rchan->desc.active, &list);
1028 list_splice_init(&rchan->desc.done, &list);
1029 list_splice_init(&rchan->desc.wait, &list);
Laurent Pinchart1ed13152014-07-19 00:05:14 +02001030
Muhammad Hamza Farooq48c73652016-06-30 17:15:17 +02001031 rchan->desc.running = NULL;
1032
Laurent Pinchart1ed13152014-07-19 00:05:14 +02001033 list_for_each_entry(desc, &list, node)
1034 rcar_dmac_realloc_hwdesc(rchan, desc, 0);
1035
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001036 list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) {
1037 list_del(&page->node);
1038 free_page((unsigned long)page);
1039 }
1040
Niklas Söderlund3139dc82017-01-11 15:39:31 +01001041 /* Remove slave mapping if present. */
1042 if (map->slave.xfer_size) {
1043 dma_unmap_resource(chan->device->dev, map->addr,
1044 map->slave.xfer_size, map->dir, 0);
1045 map->slave.xfer_size = 0;
1046 }
1047
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001048 pm_runtime_put(chan->device->dev);
1049}
1050
1051static struct dma_async_tx_descriptor *
1052rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
1053 dma_addr_t dma_src, size_t len, unsigned long flags)
1054{
1055 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1056 struct scatterlist sgl;
1057
1058 if (!len)
1059 return NULL;
1060
1061 sg_init_table(&sgl, 1);
1062 sg_set_page(&sgl, pfn_to_page(PFN_DOWN(dma_src)), len,
1063 offset_in_page(dma_src));
1064 sg_dma_address(&sgl) = dma_src;
1065 sg_dma_len(&sgl) = len;
1066
1067 return rcar_dmac_chan_prep_sg(rchan, &sgl, 1, dma_dest,
1068 DMA_MEM_TO_MEM, flags, false);
1069}
1070
Niklas Söderlund9f878602016-08-10 13:22:19 +02001071static int rcar_dmac_map_slave_addr(struct dma_chan *chan,
1072 enum dma_transfer_direction dir)
1073{
1074 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1075 struct rcar_dmac_chan_map *map = &rchan->map;
1076 phys_addr_t dev_addr;
1077 size_t dev_size;
1078 enum dma_data_direction dev_dir;
1079
1080 if (dir == DMA_DEV_TO_MEM) {
1081 dev_addr = rchan->src.slave_addr;
1082 dev_size = rchan->src.xfer_size;
1083 dev_dir = DMA_TO_DEVICE;
1084 } else {
1085 dev_addr = rchan->dst.slave_addr;
1086 dev_size = rchan->dst.xfer_size;
1087 dev_dir = DMA_FROM_DEVICE;
1088 }
1089
1090 /* Reuse current map if possible. */
1091 if (dev_addr == map->slave.slave_addr &&
1092 dev_size == map->slave.xfer_size &&
1093 dev_dir == map->dir)
1094 return 0;
1095
1096 /* Remove old mapping if present. */
1097 if (map->slave.xfer_size)
1098 dma_unmap_resource(chan->device->dev, map->addr,
1099 map->slave.xfer_size, map->dir, 0);
1100 map->slave.xfer_size = 0;
1101
1102 /* Create new slave address map. */
1103 map->addr = dma_map_resource(chan->device->dev, dev_addr, dev_size,
1104 dev_dir, 0);
1105
1106 if (dma_mapping_error(chan->device->dev, map->addr)) {
1107 dev_err(chan->device->dev,
1108 "chan%u: failed to map %zx@%pap", rchan->index,
1109 dev_size, &dev_addr);
1110 return -EIO;
1111 }
1112
1113 dev_dbg(chan->device->dev, "chan%u: map %zx@%pap to %pad dir: %s\n",
1114 rchan->index, dev_size, &dev_addr, &map->addr,
1115 dev_dir == DMA_TO_DEVICE ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE");
1116
1117 map->slave.slave_addr = dev_addr;
1118 map->slave.xfer_size = dev_size;
1119 map->dir = dev_dir;
1120
1121 return 0;
1122}
1123
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001124static struct dma_async_tx_descriptor *
1125rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1126 unsigned int sg_len, enum dma_transfer_direction dir,
1127 unsigned long flags, void *context)
1128{
1129 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001130
1131 /* Someone calling slave DMA on a generic channel? */
1132 if (rchan->mid_rid < 0 || !sg_len) {
1133 dev_warn(chan->device->dev,
1134 "%s: bad parameter: len=%d, id=%d\n",
1135 __func__, sg_len, rchan->mid_rid);
1136 return NULL;
1137 }
1138
Niklas Söderlund9f878602016-08-10 13:22:19 +02001139 if (rcar_dmac_map_slave_addr(chan, dir))
1140 return NULL;
1141
1142 return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr,
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001143 dir, flags, false);
1144}
1145
1146#define RCAR_DMAC_MAX_SG_LEN 32
1147
1148static struct dma_async_tx_descriptor *
1149rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
1150 size_t buf_len, size_t period_len,
1151 enum dma_transfer_direction dir, unsigned long flags)
1152{
1153 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1154 struct dma_async_tx_descriptor *desc;
1155 struct scatterlist *sgl;
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001156 unsigned int sg_len;
1157 unsigned int i;
1158
1159 /* Someone calling slave DMA on a generic channel? */
1160 if (rchan->mid_rid < 0 || buf_len < period_len) {
1161 dev_warn(chan->device->dev,
1162 "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
1163 __func__, buf_len, period_len, rchan->mid_rid);
1164 return NULL;
1165 }
1166
Niklas Söderlund9f878602016-08-10 13:22:19 +02001167 if (rcar_dmac_map_slave_addr(chan, dir))
1168 return NULL;
1169
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001170 sg_len = buf_len / period_len;
1171 if (sg_len > RCAR_DMAC_MAX_SG_LEN) {
1172 dev_err(chan->device->dev,
1173 "chan%u: sg length %d exceds limit %d",
1174 rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN);
1175 return NULL;
1176 }
1177
1178 /*
1179 * Allocate the sg list dynamically as it would consume too much stack
1180 * space.
1181 */
1182 sgl = kcalloc(sg_len, sizeof(*sgl), GFP_NOWAIT);
1183 if (!sgl)
1184 return NULL;
1185
1186 sg_init_table(sgl, sg_len);
1187
1188 for (i = 0; i < sg_len; ++i) {
1189 dma_addr_t src = buf_addr + (period_len * i);
1190
1191 sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
1192 offset_in_page(src));
1193 sg_dma_address(&sgl[i]) = src;
1194 sg_dma_len(&sgl[i]) = period_len;
1195 }
1196
Niklas Söderlund9f878602016-08-10 13:22:19 +02001197 desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr,
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001198 dir, flags, true);
1199
1200 kfree(sgl);
1201 return desc;
1202}
1203
1204static int rcar_dmac_device_config(struct dma_chan *chan,
1205 struct dma_slave_config *cfg)
1206{
1207 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1208
1209 /*
1210 * We could lock this, but you shouldn't be configuring the
1211 * channel, while using it...
1212 */
Niklas Söderlundc5ed08e2016-08-10 13:22:18 +02001213 rchan->src.slave_addr = cfg->src_addr;
1214 rchan->dst.slave_addr = cfg->dst_addr;
1215 rchan->src.xfer_size = cfg->src_addr_width;
1216 rchan->dst.xfer_size = cfg->dst_addr_width;
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001217
1218 return 0;
1219}
1220
1221static int rcar_dmac_chan_terminate_all(struct dma_chan *chan)
1222{
1223 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1224 unsigned long flags;
1225
1226 spin_lock_irqsave(&rchan->lock, flags);
1227 rcar_dmac_chan_halt(rchan);
1228 spin_unlock_irqrestore(&rchan->lock, flags);
1229
1230 /*
1231 * FIXME: No new interrupt can occur now, but the IRQ thread might still
1232 * be running.
1233 */
1234
1235 rcar_dmac_chan_reinit(rchan);
1236
1237 return 0;
1238}
1239
1240static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
1241 dma_cookie_t cookie)
1242{
1243 struct rcar_dmac_desc *desc = chan->desc.running;
Laurent Pinchartccadee92014-07-16 23:15:48 +02001244 struct rcar_dmac_xfer_chunk *running = NULL;
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001245 struct rcar_dmac_xfer_chunk *chunk;
Laurent Pinchart55bd5822016-06-30 17:15:18 +02001246 enum dma_status status;
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001247 unsigned int residue = 0;
Laurent Pinchartccadee92014-07-16 23:15:48 +02001248 unsigned int dptr = 0;
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001249
1250 if (!desc)
1251 return 0;
1252
1253 /*
Laurent Pinchart55bd5822016-06-30 17:15:18 +02001254 * If the cookie corresponds to a descriptor that has been completed
1255 * there is no residue. The same check has already been performed by the
1256 * caller but without holding the channel lock, so the descriptor could
1257 * now be complete.
1258 */
1259 status = dma_cookie_status(&chan->chan, cookie, NULL);
1260 if (status == DMA_COMPLETE)
1261 return 0;
1262
1263 /*
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001264 * If the cookie doesn't correspond to the currently running transfer
1265 * then the descriptor hasn't been processed yet, and the residue is
1266 * equal to the full descriptor size.
1267 */
Laurent Pinchart55bd5822016-06-30 17:15:18 +02001268 if (cookie != desc->async_tx.cookie) {
1269 list_for_each_entry(desc, &chan->desc.pending, node) {
1270 if (cookie == desc->async_tx.cookie)
1271 return desc->size;
1272 }
1273 list_for_each_entry(desc, &chan->desc.active, node) {
1274 if (cookie == desc->async_tx.cookie)
1275 return desc->size;
1276 }
1277
1278 /*
1279 * No descriptor found for the cookie, there's thus no residue.
1280 * This shouldn't happen if the calling driver passes a correct
1281 * cookie value.
1282 */
1283 WARN(1, "No descriptor for cookie!");
1284 return 0;
1285 }
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001286
Laurent Pinchartccadee92014-07-16 23:15:48 +02001287 /*
1288 * In descriptor mode the descriptor running pointer is not maintained
1289 * by the interrupt handler, find the running descriptor from the
1290 * descriptor pointer field in the CHCRB register. In non-descriptor
1291 * mode just use the running descriptor pointer.
1292 */
Laurent Pinchart1ed13152014-07-19 00:05:14 +02001293 if (desc->hwdescs.use) {
Laurent Pinchartccadee92014-07-16 23:15:48 +02001294 dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
1295 RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
Kuninori Morimoto56b17702017-05-23 07:08:43 +00001296 if (dptr == 0)
1297 dptr = desc->nchunks;
1298 dptr--;
Laurent Pinchartccadee92014-07-16 23:15:48 +02001299 WARN_ON(dptr >= desc->nchunks);
1300 } else {
1301 running = desc->running;
1302 }
1303
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001304 /* Compute the size of all chunks still to be transferred. */
1305 list_for_each_entry_reverse(chunk, &desc->chunks, node) {
Laurent Pinchartccadee92014-07-16 23:15:48 +02001306 if (chunk == running || ++dptr == desc->nchunks)
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001307 break;
1308
1309 residue += chunk->size;
1310 }
1311
1312 /* Add the residue for the current chunk. */
1313 residue += rcar_dmac_chan_read(chan, RCAR_DMATCR) << desc->xfer_shift;
1314
1315 return residue;
1316}
1317
1318static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
1319 dma_cookie_t cookie,
1320 struct dma_tx_state *txstate)
1321{
1322 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1323 enum dma_status status;
1324 unsigned long flags;
1325 unsigned int residue;
1326
1327 status = dma_cookie_status(chan, cookie, txstate);
1328 if (status == DMA_COMPLETE || !txstate)
1329 return status;
1330
1331 spin_lock_irqsave(&rchan->lock, flags);
1332 residue = rcar_dmac_chan_get_residue(rchan, cookie);
1333 spin_unlock_irqrestore(&rchan->lock, flags);
1334
Muhammad Hamza Farooq3544d282016-06-30 17:15:15 +02001335 /* if there's no residue, the cookie is complete */
1336 if (!residue)
1337 return DMA_COMPLETE;
1338
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001339 dma_set_residue(txstate, residue);
1340
1341 return status;
1342}
1343
1344static void rcar_dmac_issue_pending(struct dma_chan *chan)
1345{
1346 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1347 unsigned long flags;
1348
1349 spin_lock_irqsave(&rchan->lock, flags);
1350
1351 if (list_empty(&rchan->desc.pending))
1352 goto done;
1353
1354 /* Append the pending list to the active list. */
1355 list_splice_tail_init(&rchan->desc.pending, &rchan->desc.active);
1356
1357 /*
1358 * If no transfer is running pick the first descriptor from the active
1359 * list and start the transfer.
1360 */
1361 if (!rchan->desc.running) {
1362 struct rcar_dmac_desc *desc;
1363
1364 desc = list_first_entry(&rchan->desc.active,
1365 struct rcar_dmac_desc, node);
1366 rchan->desc.running = desc;
1367
1368 rcar_dmac_chan_start_xfer(rchan);
1369 }
1370
1371done:
1372 spin_unlock_irqrestore(&rchan->lock, flags);
1373}
1374
Niklas Söderlund30c45002017-05-16 01:09:16 +02001375static void rcar_dmac_device_synchronize(struct dma_chan *chan)
1376{
1377 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1378
1379 synchronize_irq(rchan->irq);
1380}
1381
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001382/* -----------------------------------------------------------------------------
1383 * IRQ handling
1384 */
1385
Laurent Pinchartccadee92014-07-16 23:15:48 +02001386static irqreturn_t rcar_dmac_isr_desc_stage_end(struct rcar_dmac_chan *chan)
1387{
1388 struct rcar_dmac_desc *desc = chan->desc.running;
1389 unsigned int stage;
1390
1391 if (WARN_ON(!desc || !desc->cyclic)) {
1392 /*
1393 * This should never happen, there should always be a running
1394 * cyclic descriptor when a descriptor stage end interrupt is
1395 * triggered. Warn and return.
1396 */
1397 return IRQ_NONE;
1398 }
1399
1400 /* Program the interrupt pointer to the next stage. */
1401 stage = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
1402 RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
1403 rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(stage));
1404
1405 return IRQ_WAKE_THREAD;
1406}
1407
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001408static irqreturn_t rcar_dmac_isr_transfer_end(struct rcar_dmac_chan *chan)
1409{
1410 struct rcar_dmac_desc *desc = chan->desc.running;
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001411 irqreturn_t ret = IRQ_WAKE_THREAD;
1412
1413 if (WARN_ON_ONCE(!desc)) {
1414 /*
Laurent Pinchartccadee92014-07-16 23:15:48 +02001415 * This should never happen, there should always be a running
1416 * descriptor when a transfer end interrupt is triggered. Warn
1417 * and return.
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001418 */
1419 return IRQ_NONE;
1420 }
1421
1422 /*
Laurent Pinchartccadee92014-07-16 23:15:48 +02001423 * The transfer end interrupt isn't generated for each chunk when using
1424 * descriptor mode. Only update the running chunk pointer in
1425 * non-descriptor mode.
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001426 */
Laurent Pinchart1ed13152014-07-19 00:05:14 +02001427 if (!desc->hwdescs.use) {
Laurent Pinchartccadee92014-07-16 23:15:48 +02001428 /*
1429 * If we haven't completed the last transfer chunk simply move
1430 * to the next one. Only wake the IRQ thread if the transfer is
1431 * cyclic.
1432 */
1433 if (!list_is_last(&desc->running->node, &desc->chunks)) {
1434 desc->running = list_next_entry(desc->running, node);
1435 if (!desc->cyclic)
1436 ret = IRQ_HANDLED;
1437 goto done;
1438 }
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001439
Laurent Pinchartccadee92014-07-16 23:15:48 +02001440 /*
1441 * We've completed the last transfer chunk. If the transfer is
1442 * cyclic, move back to the first one.
1443 */
1444 if (desc->cyclic) {
1445 desc->running =
1446 list_first_entry(&desc->chunks,
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001447 struct rcar_dmac_xfer_chunk,
1448 node);
Laurent Pinchartccadee92014-07-16 23:15:48 +02001449 goto done;
1450 }
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001451 }
1452
1453 /* The descriptor is complete, move it to the done list. */
1454 list_move_tail(&desc->node, &chan->desc.done);
1455
1456 /* Queue the next descriptor, if any. */
1457 if (!list_empty(&chan->desc.active))
1458 chan->desc.running = list_first_entry(&chan->desc.active,
1459 struct rcar_dmac_desc,
1460 node);
1461 else
1462 chan->desc.running = NULL;
1463
1464done:
1465 if (chan->desc.running)
1466 rcar_dmac_chan_start_xfer(chan);
1467
1468 return ret;
1469}
1470
1471static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
1472{
Laurent Pinchartccadee92014-07-16 23:15:48 +02001473 u32 mask = RCAR_DMACHCR_DSE | RCAR_DMACHCR_TE;
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001474 struct rcar_dmac_chan *chan = dev;
1475 irqreturn_t ret = IRQ_NONE;
1476 u32 chcr;
1477
1478 spin_lock(&chan->lock);
1479
1480 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
Laurent Pinchartccadee92014-07-16 23:15:48 +02001481 if (chcr & RCAR_DMACHCR_TE)
1482 mask |= RCAR_DMACHCR_DE;
1483 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask);
1484
1485 if (chcr & RCAR_DMACHCR_DSE)
1486 ret |= rcar_dmac_isr_desc_stage_end(chan);
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001487
1488 if (chcr & RCAR_DMACHCR_TE)
1489 ret |= rcar_dmac_isr_transfer_end(chan);
1490
1491 spin_unlock(&chan->lock);
1492
1493 return ret;
1494}
1495
1496static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
1497{
1498 struct rcar_dmac_chan *chan = dev;
1499 struct rcar_dmac_desc *desc;
Dave Jiang964b2fd2016-07-20 13:12:53 -07001500 struct dmaengine_desc_callback cb;
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001501
1502 spin_lock_irq(&chan->lock);
1503
1504 /* For cyclic transfers notify the user after every chunk. */
1505 if (chan->desc.running && chan->desc.running->cyclic) {
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001506 desc = chan->desc.running;
Dave Jiang964b2fd2016-07-20 13:12:53 -07001507 dmaengine_desc_get_callback(&desc->async_tx, &cb);
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001508
Dave Jiang964b2fd2016-07-20 13:12:53 -07001509 if (dmaengine_desc_callback_valid(&cb)) {
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001510 spin_unlock_irq(&chan->lock);
Dave Jiang964b2fd2016-07-20 13:12:53 -07001511 dmaengine_desc_callback_invoke(&cb, NULL);
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001512 spin_lock_irq(&chan->lock);
1513 }
1514 }
1515
1516 /*
1517 * Call the callback function for all descriptors on the done list and
1518 * move them to the ack wait list.
1519 */
1520 while (!list_empty(&chan->desc.done)) {
1521 desc = list_first_entry(&chan->desc.done, struct rcar_dmac_desc,
1522 node);
1523 dma_cookie_complete(&desc->async_tx);
1524 list_del(&desc->node);
1525
Dave Jiang964b2fd2016-07-20 13:12:53 -07001526 dmaengine_desc_get_callback(&desc->async_tx, &cb);
1527 if (dmaengine_desc_callback_valid(&cb)) {
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001528 spin_unlock_irq(&chan->lock);
1529 /*
1530 * We own the only reference to this descriptor, we can
1531 * safely dereference it without holding the channel
1532 * lock.
1533 */
Dave Jiang964b2fd2016-07-20 13:12:53 -07001534 dmaengine_desc_callback_invoke(&cb, NULL);
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001535 spin_lock_irq(&chan->lock);
1536 }
1537
1538 list_add_tail(&desc->node, &chan->desc.wait);
1539 }
1540
Laurent Pinchartccadee92014-07-16 23:15:48 +02001541 spin_unlock_irq(&chan->lock);
1542
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001543 /* Recycle all acked descriptors. */
1544 rcar_dmac_desc_recycle_acked(chan);
1545
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001546 return IRQ_HANDLED;
1547}
1548
1549static irqreturn_t rcar_dmac_isr_error(int irq, void *data)
1550{
1551 struct rcar_dmac *dmac = data;
1552
1553 if (!(rcar_dmac_read(dmac, RCAR_DMAOR) & RCAR_DMAOR_AE))
1554 return IRQ_NONE;
1555
1556 /*
1557 * An unrecoverable error occurred on an unknown channel. Halt the DMAC,
1558 * abort transfers on all channels, and reinitialize the DMAC.
1559 */
1560 rcar_dmac_stop(dmac);
1561 rcar_dmac_abort(dmac);
1562 rcar_dmac_init(dmac);
1563
1564 return IRQ_HANDLED;
1565}
1566
1567/* -----------------------------------------------------------------------------
1568 * OF xlate and channel filter
1569 */
1570
1571static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg)
1572{
1573 struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
1574 struct of_phandle_args *dma_spec = arg;
1575
1576 /*
1577 * FIXME: Using a filter on OF platforms is a nonsense. The OF xlate
1578 * function knows from which device it wants to allocate a channel from,
1579 * and would be perfectly capable of selecting the channel it wants.
1580 * Forcing it to call dma_request_channel() and iterate through all
1581 * channels from all controllers is just pointless.
1582 */
1583 if (chan->device->device_config != rcar_dmac_device_config ||
1584 dma_spec->np != chan->device->dev->of_node)
1585 return false;
1586
1587 return !test_and_set_bit(dma_spec->args[0], dmac->modules);
1588}
1589
1590static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec,
1591 struct of_dma *ofdma)
1592{
1593 struct rcar_dmac_chan *rchan;
1594 struct dma_chan *chan;
1595 dma_cap_mask_t mask;
1596
1597 if (dma_spec->args_count != 1)
1598 return NULL;
1599
1600 /* Only slave DMA channels can be allocated via DT */
1601 dma_cap_zero(mask);
1602 dma_cap_set(DMA_SLAVE, mask);
1603
1604 chan = dma_request_channel(mask, rcar_dmac_chan_filter, dma_spec);
1605 if (!chan)
1606 return NULL;
1607
1608 rchan = to_rcar_dmac_chan(chan);
1609 rchan->mid_rid = dma_spec->args[0];
1610
1611 return chan;
1612}
1613
1614/* -----------------------------------------------------------------------------
1615 * Power management
1616 */
1617
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001618#ifdef CONFIG_PM
1619static int rcar_dmac_runtime_suspend(struct device *dev)
1620{
1621 return 0;
1622}
1623
1624static int rcar_dmac_runtime_resume(struct device *dev)
1625{
1626 struct rcar_dmac *dmac = dev_get_drvdata(dev);
1627
1628 return rcar_dmac_init(dmac);
1629}
1630#endif
1631
1632static const struct dev_pm_ops rcar_dmac_pm = {
Geert Uytterhoeven1131b0a2018-01-17 10:38:28 +01001633 /*
1634 * TODO for system sleep/resume:
1635 * - Wait for the current transfer to complete and stop the device,
1636 * - Resume transfers, if any.
1637 */
1638 SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1639 pm_runtime_force_resume)
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001640 SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume,
1641 NULL)
1642};
1643
1644/* -----------------------------------------------------------------------------
1645 * Probe and remove
1646 */
1647
1648static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
1649 struct rcar_dmac_chan *rchan,
1650 unsigned int index)
1651{
1652 struct platform_device *pdev = to_platform_device(dmac->dev);
1653 struct dma_chan *chan = &rchan->chan;
1654 char pdev_irqname[5];
1655 char *irqname;
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001656 int ret;
1657
1658 rchan->index = index;
1659 rchan->iomem = dmac->iomem + RCAR_DMAC_CHAN_OFFSET(index);
1660 rchan->mid_rid = -EINVAL;
1661
1662 spin_lock_init(&rchan->lock);
1663
Laurent Pinchartf7638c92015-01-27 15:58:53 +02001664 INIT_LIST_HEAD(&rchan->desc.free);
1665 INIT_LIST_HEAD(&rchan->desc.pending);
1666 INIT_LIST_HEAD(&rchan->desc.active);
1667 INIT_LIST_HEAD(&rchan->desc.done);
1668 INIT_LIST_HEAD(&rchan->desc.wait);
1669
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001670 /* Request the channel interrupt. */
1671 sprintf(pdev_irqname, "ch%u", index);
Niklas Söderlund427d5ec2017-05-16 01:09:15 +02001672 rchan->irq = platform_get_irq_byname(pdev, pdev_irqname);
1673 if (rchan->irq < 0) {
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001674 dev_err(dmac->dev, "no IRQ specified for channel %u\n", index);
1675 return -ENODEV;
1676 }
1677
1678 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
1679 dev_name(dmac->dev), index);
1680 if (!irqname)
1681 return -ENOMEM;
1682
Kuninori Morimoto5e857042017-08-21 06:31:57 +00001683 /*
1684 * Initialize the DMA engine channel and add it to the DMA engine
1685 * channels list.
1686 */
1687 chan->device = &dmac->engine;
1688 dma_cookie_init(chan);
1689
1690 list_add_tail(&chan->device_node, &dmac->engine.channels);
1691
Niklas Söderlund427d5ec2017-05-16 01:09:15 +02001692 ret = devm_request_threaded_irq(dmac->dev, rchan->irq,
1693 rcar_dmac_isr_channel,
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001694 rcar_dmac_isr_channel_thread, 0,
1695 irqname, rchan);
1696 if (ret) {
Niklas Söderlund427d5ec2017-05-16 01:09:15 +02001697 dev_err(dmac->dev, "failed to request IRQ %u (%d)\n",
1698 rchan->irq, ret);
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001699 return ret;
1700 }
1701
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001702 return 0;
1703}
1704
1705static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
1706{
1707 struct device_node *np = dev->of_node;
1708 int ret;
1709
1710 ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
1711 if (ret < 0) {
1712 dev_err(dev, "unable to read dma-channels property\n");
1713 return ret;
1714 }
1715
1716 if (dmac->n_channels <= 0 || dmac->n_channels >= 100) {
1717 dev_err(dev, "invalid number of channels %u\n",
1718 dmac->n_channels);
1719 return -EINVAL;
1720 }
1721
1722 return 0;
1723}
1724
1725static int rcar_dmac_probe(struct platform_device *pdev)
1726{
1727 const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE |
1728 DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES |
1729 DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES |
1730 DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
Laurent Pinchartbe6893e2015-01-27 19:04:10 +02001731 unsigned int channels_offset = 0;
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001732 struct dma_device *engine;
1733 struct rcar_dmac *dmac;
1734 struct resource *mem;
1735 unsigned int i;
1736 char *irqname;
1737 int irq;
1738 int ret;
1739
1740 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
1741 if (!dmac)
1742 return -ENOMEM;
1743
1744 dmac->dev = &pdev->dev;
1745 platform_set_drvdata(pdev, dmac);
Geert Uytterhoevendc312342017-02-13 12:00:26 +01001746 dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001747
1748 ret = rcar_dmac_parse_of(&pdev->dev, dmac);
1749 if (ret < 0)
1750 return ret;
1751
Laurent Pinchartbe6893e2015-01-27 19:04:10 +02001752 /*
1753 * A still unconfirmed hardware bug prevents the IPMMU microTLB 0 to be
1754 * flushed correctly, resulting in memory corruption. DMAC 0 channel 0
1755 * is connected to microTLB 0 on currently supported platforms, so we
1756 * can't use it with the IPMMU. As the IOMMU API operates at the device
1757 * level we can't disable it selectively, so ignore channel 0 for now if
1758 * the device is part of an IOMMU group.
1759 */
1760 if (pdev->dev.iommu_group) {
1761 dmac->n_channels--;
1762 channels_offset = 1;
1763 }
1764
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001765 dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
1766 sizeof(*dmac->channels), GFP_KERNEL);
1767 if (!dmac->channels)
1768 return -ENOMEM;
1769
1770 /* Request resources. */
1771 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1772 dmac->iomem = devm_ioremap_resource(&pdev->dev, mem);
1773 if (IS_ERR(dmac->iomem))
1774 return PTR_ERR(dmac->iomem);
1775
1776 irq = platform_get_irq_byname(pdev, "error");
1777 if (irq < 0) {
1778 dev_err(&pdev->dev, "no error IRQ specified\n");
1779 return -ENODEV;
1780 }
1781
1782 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:error",
1783 dev_name(dmac->dev));
1784 if (!irqname)
1785 return -ENOMEM;
1786
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001787 /* Enable runtime PM and initialize the device. */
1788 pm_runtime_enable(&pdev->dev);
1789 ret = pm_runtime_get_sync(&pdev->dev);
1790 if (ret < 0) {
1791 dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
1792 return ret;
1793 }
1794
1795 ret = rcar_dmac_init(dmac);
1796 pm_runtime_put(&pdev->dev);
1797
1798 if (ret) {
1799 dev_err(&pdev->dev, "failed to reset device\n");
1800 goto error;
1801 }
1802
Kuninori Morimoto5e857042017-08-21 06:31:57 +00001803 /* Initialize engine */
1804 engine = &dmac->engine;
1805
1806 dma_cap_set(DMA_MEMCPY, engine->cap_mask);
1807 dma_cap_set(DMA_SLAVE, engine->cap_mask);
1808
1809 engine->dev = &pdev->dev;
1810 engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE);
1811
1812 engine->src_addr_widths = widths;
1813 engine->dst_addr_widths = widths;
1814 engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1815 engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1816
1817 engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources;
1818 engine->device_free_chan_resources = rcar_dmac_free_chan_resources;
1819 engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy;
1820 engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg;
1821 engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic;
1822 engine->device_config = rcar_dmac_device_config;
1823 engine->device_terminate_all = rcar_dmac_chan_terminate_all;
1824 engine->device_tx_status = rcar_dmac_tx_status;
1825 engine->device_issue_pending = rcar_dmac_issue_pending;
1826 engine->device_synchronize = rcar_dmac_device_synchronize;
1827
1828 INIT_LIST_HEAD(&engine->channels);
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001829
1830 for (i = 0; i < dmac->n_channels; ++i) {
Laurent Pinchartbe6893e2015-01-27 19:04:10 +02001831 ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i],
1832 i + channels_offset);
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001833 if (ret < 0)
1834 goto error;
1835 }
1836
Kuninori Morimoto5e857042017-08-21 06:31:57 +00001837 ret = devm_request_irq(&pdev->dev, irq, rcar_dmac_isr_error, 0,
1838 irqname, dmac);
1839 if (ret) {
1840 dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n",
1841 irq, ret);
1842 return ret;
1843 }
1844
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001845 /* Register the DMAC as a DMA provider for DT. */
1846 ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate,
1847 NULL);
1848 if (ret < 0)
1849 goto error;
1850
1851 /*
1852 * Register the DMA engine device.
1853 *
1854 * Default transfer size of 32 bytes requires 32-byte alignment.
1855 */
Laurent Pinchart87244fe2014-07-09 00:42:19 +02001856 ret = dma_async_device_register(engine);
1857 if (ret < 0)
1858 goto error;
1859
1860 return 0;
1861
1862error:
1863 of_dma_controller_free(pdev->dev.of_node);
1864 pm_runtime_disable(&pdev->dev);
1865 return ret;
1866}
1867
1868static int rcar_dmac_remove(struct platform_device *pdev)
1869{
1870 struct rcar_dmac *dmac = platform_get_drvdata(pdev);
1871
1872 of_dma_controller_free(pdev->dev.of_node);
1873 dma_async_device_unregister(&dmac->engine);
1874
1875 pm_runtime_disable(&pdev->dev);
1876
1877 return 0;
1878}
1879
1880static void rcar_dmac_shutdown(struct platform_device *pdev)
1881{
1882 struct rcar_dmac *dmac = platform_get_drvdata(pdev);
1883
1884 rcar_dmac_stop(dmac);
1885}
1886
1887static const struct of_device_id rcar_dmac_of_ids[] = {
1888 { .compatible = "renesas,rcar-dmac", },
1889 { /* Sentinel */ }
1890};
1891MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids);
1892
1893static struct platform_driver rcar_dmac_driver = {
1894 .driver = {
1895 .pm = &rcar_dmac_pm,
1896 .name = "rcar-dmac",
1897 .of_match_table = rcar_dmac_of_ids,
1898 },
1899 .probe = rcar_dmac_probe,
1900 .remove = rcar_dmac_remove,
1901 .shutdown = rcar_dmac_shutdown,
1902};
1903
1904module_platform_driver(rcar_dmac_driver);
1905
1906MODULE_DESCRIPTION("R-Car Gen2 DMA Controller Driver");
1907MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
1908MODULE_LICENSE("GPL v2");