blob: 93471a6199cebe3641603039cc48cf8bbab913a0 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302/*
3 * DMA driver for Xilinx Video DMA Engine
4 *
5 * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
6 *
7 * Based on the Freescale DMA driver.
8 *
9 * Description:
10 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
11 * core that provides high-bandwidth direct memory access between memory
12 * and AXI4-Stream type video target peripherals. The core provides efficient
13 * two dimensional DMA operations with independent asynchronous read (S2MM)
14 * and write (MM2S) channel operation. It can be configured to have either
15 * one channel or two channels. If configured as two channels, one is to
16 * transmit to the video device (MM2S) and another is to receive from the
17 * video device (S2MM). Initialization, status, interrupt and management
18 * registers are accessed through an AXI4-Lite slave interface.
19 *
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +053020 * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
21 * provides high-bandwidth one dimensional direct memory access between memory
22 * and AXI4-Stream target peripherals. It supports one receive and one
23 * transmit channel, both of them optional at synthesis time.
24 *
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +053025 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
26 * Access (DMA) between a memory-mapped source address and a memory-mapped
27 * destination address.
Srikanth Thokala9cd43602014-04-23 20:23:26 +053028 */
29
Srikanth Thokala9cd43602014-04-23 20:23:26 +053030#include <linux/bitops.h>
31#include <linux/dmapool.h>
Kedareswara rao Appana937abe82015-03-02 23:24:24 +053032#include <linux/dma/xilinx_dma.h>
Srikanth Thokala9cd43602014-04-23 20:23:26 +053033#include <linux/init.h>
34#include <linux/interrupt.h>
35#include <linux/io.h>
Kedareswara rao Appana9495f262016-02-26 19:33:54 +053036#include <linux/iopoll.h>
Srikanth Thokala9cd43602014-04-23 20:23:26 +053037#include <linux/module.h>
38#include <linux/of_address.h>
39#include <linux/of_dma.h>
40#include <linux/of_platform.h>
41#include <linux/of_irq.h>
42#include <linux/slab.h>
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +053043#include <linux/clk.h>
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +053044#include <linux/io-64-nonatomic-lo-hi.h>
Srikanth Thokala9cd43602014-04-23 20:23:26 +053045
46#include "../dmaengine.h"
47
48/* Register/Descriptor Offsets */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +053049#define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
50#define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
Srikanth Thokala9cd43602014-04-23 20:23:26 +053051#define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
52#define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
53
54/* Control Registers */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +053055#define XILINX_DMA_REG_DMACR 0x0000
56#define XILINX_DMA_DMACR_DELAY_MAX 0xff
57#define XILINX_DMA_DMACR_DELAY_SHIFT 24
58#define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
59#define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
60#define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
61#define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
62#define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
63#define XILINX_DMA_DMACR_MASTER_SHIFT 8
64#define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
65#define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
66#define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
67#define XILINX_DMA_DMACR_RESET BIT(2)
68#define XILINX_DMA_DMACR_CIRC_EN BIT(1)
69#define XILINX_DMA_DMACR_RUNSTOP BIT(0)
70#define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
Srikanth Thokala9cd43602014-04-23 20:23:26 +053071
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +053072#define XILINX_DMA_REG_DMASR 0x0004
73#define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
74#define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
75#define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
76#define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
77#define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
78#define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
79#define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
80#define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
81#define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
82#define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
83#define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
84#define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
Andrea Merello05f7ea72018-11-20 16:31:49 +010085#define XILINX_DMA_DMASR_SG_MASK BIT(3)
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +053086#define XILINX_DMA_DMASR_IDLE BIT(1)
87#define XILINX_DMA_DMASR_HALTED BIT(0)
88#define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
89#define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
Srikanth Thokala9cd43602014-04-23 20:23:26 +053090
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +053091#define XILINX_DMA_REG_CURDESC 0x0008
92#define XILINX_DMA_REG_TAILDESC 0x0010
93#define XILINX_DMA_REG_REG_INDEX 0x0014
94#define XILINX_DMA_REG_FRMSTORE 0x0018
95#define XILINX_DMA_REG_THRESHOLD 0x001c
96#define XILINX_DMA_REG_FRMPTR_STS 0x0024
97#define XILINX_DMA_REG_PARK_PTR 0x0028
98#define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
Kedareswara rao Appanafe0503e2017-12-07 10:51:03 +053099#define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530100#define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
Kedareswara rao Appanafe0503e2017-12-07 10:51:03 +0530101#define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530102#define XILINX_DMA_REG_VDMA_VERSION 0x002c
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530103
104/* Register Direct Mode Registers */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530105#define XILINX_DMA_REG_VSIZE 0x0000
106#define XILINX_DMA_REG_HSIZE 0x0004
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530107
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530108#define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
109#define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
110#define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530111
112#define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530113#define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530114
Radhey Shyam Pandey0894aa22018-06-13 13:04:48 +0530115#define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec
116#define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
117
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530118/* HW specific definitions */
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +0530119#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530120
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530121#define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
122 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
123 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
124 XILINX_DMA_DMASR_ERR_IRQ)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530125
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530126#define XILINX_DMA_DMASR_ALL_ERR_MASK \
127 (XILINX_DMA_DMASR_EOL_LATE_ERR | \
128 XILINX_DMA_DMASR_SOF_LATE_ERR | \
129 XILINX_DMA_DMASR_SG_DEC_ERR | \
130 XILINX_DMA_DMASR_SG_SLV_ERR | \
131 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
132 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
133 XILINX_DMA_DMASR_DMA_DEC_ERR | \
134 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
135 XILINX_DMA_DMASR_DMA_INT_ERR)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530136
137/*
138 * Recoverable errors are DMA Internal error, SOF Early, EOF Early
139 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
140 * is enabled in the h/w system.
141 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530142#define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
143 (XILINX_DMA_DMASR_SOF_LATE_ERR | \
144 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
145 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
146 XILINX_DMA_DMASR_DMA_INT_ERR)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530147
148/* Axi VDMA Flush on Fsync bits */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530149#define XILINX_DMA_FLUSH_S2MM 3
150#define XILINX_DMA_FLUSH_MM2S 2
151#define XILINX_DMA_FLUSH_BOTH 1
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530152
153/* Delay loop counter to prevent hardware failure */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530154#define XILINX_DMA_LOOP_COUNT 1000000
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530155
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530156/* AXI DMA Specific Registers/Offsets */
157#define XILINX_DMA_REG_SRCDSTADDR 0x18
158#define XILINX_DMA_REG_BTT 0x28
159
160/* AXI DMA Specific Masks/Bit fields */
Radhey Shyam Pandeyae809692018-11-20 16:31:48 +0100161#define XILINX_DMA_MAX_TRANS_LEN_MIN 8
162#define XILINX_DMA_MAX_TRANS_LEN_MAX 23
163#define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530164#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530165#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530166#define XILINX_DMA_CR_COALESCE_SHIFT 16
167#define XILINX_DMA_BD_SOP BIT(27)
168#define XILINX_DMA_BD_EOP BIT(26)
169#define XILINX_DMA_COALESCE_MAX 255
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530170#define XILINX_DMA_NUM_DESCS 255
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530171#define XILINX_DMA_NUM_APP_WORDS 5
172
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +0530173/* Multi-Channel DMA Descriptor offsets*/
174#define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20)
175#define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20)
176
177/* Multi-Channel DMA Masks/Shifts */
178#define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0)
179#define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0)
180#define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19)
181#define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0)
182#define XILINX_DMA_BD_STRIDE_SHIFT 0
183#define XILINX_DMA_BD_VSIZE_SHIFT 19
184
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530185/* AXI CDMA Specific Registers/Offsets */
186#define XILINX_CDMA_REG_SRCADDR 0x18
187#define XILINX_CDMA_REG_DSTADDR 0x20
188
189/* AXI CDMA Specific Masks */
190#define XILINX_CDMA_CR_SGMODE BIT(3)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530191
Radhey Shyam Pandey4e47d242018-09-29 11:17:59 -0600192#define xilinx_prep_dma_addr_t(addr) \
193 ((dma_addr_t)((u64)addr##_##msb << 32 | (addr)))
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530194/**
195 * struct xilinx_vdma_desc_hw - Hardware Descriptor
196 * @next_desc: Next Descriptor Pointer @0x00
197 * @pad1: Reserved @0x04
198 * @buf_addr: Buffer address @0x08
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530199 * @buf_addr_msb: MSB of Buffer address @0x0C
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530200 * @vsize: Vertical Size @0x10
201 * @hsize: Horizontal Size @0x14
202 * @stride: Number of bytes between the first
203 * pixels of each horizontal line @0x18
204 */
205struct xilinx_vdma_desc_hw {
206 u32 next_desc;
207 u32 pad1;
208 u32 buf_addr;
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530209 u32 buf_addr_msb;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530210 u32 vsize;
211 u32 hsize;
212 u32 stride;
213} __aligned(64);
214
215/**
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530216 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
217 * @next_desc: Next Descriptor Pointer @0x00
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +0530218 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530219 * @buf_addr: Buffer address @0x08
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +0530220 * @buf_addr_msb: MSB of Buffer address @0x0C
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +0530221 * @mcdma_control: Control field for mcdma @0x10
222 * @vsize_stride: Vsize and Stride field for mcdma @0x14
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530223 * @control: Control field @0x18
224 * @status: Status field @0x1C
225 * @app: APP Fields @0x20 - 0x30
226 */
227struct xilinx_axidma_desc_hw {
228 u32 next_desc;
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +0530229 u32 next_desc_msb;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530230 u32 buf_addr;
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +0530231 u32 buf_addr_msb;
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +0530232 u32 mcdma_control;
233 u32 vsize_stride;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530234 u32 control;
235 u32 status;
236 u32 app[XILINX_DMA_NUM_APP_WORDS];
237} __aligned(64);
238
239/**
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530240 * struct xilinx_cdma_desc_hw - Hardware Descriptor
241 * @next_desc: Next Descriptor Pointer @0x00
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +0530242 * @next_desc_msb: Next Descriptor Pointer MSB @0x04
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530243 * @src_addr: Source address @0x08
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +0530244 * @src_addr_msb: Source address MSB @0x0C
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530245 * @dest_addr: Destination address @0x10
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +0530246 * @dest_addr_msb: Destination address MSB @0x14
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530247 * @control: Control field @0x18
248 * @status: Status field @0x1C
249 */
250struct xilinx_cdma_desc_hw {
251 u32 next_desc;
Kedareswara rao Appana9791e712016-06-07 19:21:16 +0530252 u32 next_desc_msb;
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530253 u32 src_addr;
Kedareswara rao Appana9791e712016-06-07 19:21:16 +0530254 u32 src_addr_msb;
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530255 u32 dest_addr;
Kedareswara rao Appana9791e712016-06-07 19:21:16 +0530256 u32 dest_addr_msb;
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530257 u32 control;
258 u32 status;
259} __aligned(64);
260
261/**
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530262 * struct xilinx_vdma_tx_segment - Descriptor segment
263 * @hw: Hardware descriptor
264 * @node: Node in the descriptor segments list
265 * @phys: Physical address of segment
266 */
267struct xilinx_vdma_tx_segment {
268 struct xilinx_vdma_desc_hw hw;
269 struct list_head node;
270 dma_addr_t phys;
271} __aligned(64);
272
273/**
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530274 * struct xilinx_axidma_tx_segment - Descriptor segment
275 * @hw: Hardware descriptor
276 * @node: Node in the descriptor segments list
277 * @phys: Physical address of segment
278 */
279struct xilinx_axidma_tx_segment {
280 struct xilinx_axidma_desc_hw hw;
281 struct list_head node;
282 dma_addr_t phys;
283} __aligned(64);
284
285/**
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530286 * struct xilinx_cdma_tx_segment - Descriptor segment
287 * @hw: Hardware descriptor
288 * @node: Node in the descriptor segments list
289 * @phys: Physical address of segment
290 */
291struct xilinx_cdma_tx_segment {
292 struct xilinx_cdma_desc_hw hw;
293 struct list_head node;
294 dma_addr_t phys;
295} __aligned(64);
296
297/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530298 * struct xilinx_dma_tx_descriptor - Per Transaction structure
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530299 * @async_tx: Async transaction descriptor
300 * @segments: TX segments list
301 * @node: Node in the channel descriptors list
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530302 * @cyclic: Check for cyclic transfers.
Nicholas Graumannd8bae212019-10-15 20:18:22 +0530303 * @err: Whether the descriptor has an error.
304 * @residue: Residue of the completed descriptor
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530305 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530306struct xilinx_dma_tx_descriptor {
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530307 struct dma_async_tx_descriptor async_tx;
308 struct list_head segments;
309 struct list_head node;
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530310 bool cyclic;
Nicholas Graumannd8bae212019-10-15 20:18:22 +0530311 bool err;
312 u32 residue;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530313};
314
315/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530316 * struct xilinx_dma_chan - Driver specific DMA channel structure
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530317 * @xdev: Driver specific device structure
318 * @ctrl_offset: Control registers offset
319 * @desc_offset: TX descriptor registers offset
320 * @lock: Descriptor operation lock
321 * @pending_list: Descriptors waiting
Kedareswara rao Appana7096f362016-02-26 19:33:51 +0530322 * @active_list: Descriptors ready to submit
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530323 * @done_list: Complete descriptors
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530324 * @free_seg_list: Free descriptors
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530325 * @common: DMA common channel
326 * @desc_pool: Descriptors pool
327 * @dev: The dma device
328 * @irq: Channel IRQ
329 * @id: Channel ID
330 * @direction: Transfer direction
331 * @num_frms: Number of frames
332 * @has_sg: Support scatter transfers
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530333 * @cyclic: Check for cyclic transfers.
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530334 * @genlock: Support genlock mode
335 * @err: Channel has errors
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +0530336 * @idle: Check for channel idle
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530337 * @tasklet: Cleanup work after irq
338 * @config: Device configuration info
339 * @flush_on_fsync: Flush on Frame sync
Kedareswara rao Appana7096f362016-02-26 19:33:51 +0530340 * @desc_pendingcount: Descriptor pending count
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530341 * @ext_addr: Indicates 64 bit addressing is supported by dma channel
Kedareswara rao Appanaa65cf5122016-04-06 10:38:09 +0530342 * @desc_submitcount: Descriptor h/w submitted count
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530343 * @seg_v: Statically allocated segments base
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530344 * @seg_p: Physical allocated segments base
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530345 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530346 * @cyclic_seg_p: Physical allocated segments base for cyclic dma
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530347 * @start_transfer: Differentiate b/w DMA IP's transfer
Akinobu Mita676f9c22017-03-14 00:59:11 +0900348 * @stop_transfer: Differentiate b/w DMA IP's quiesce
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +0530349 * @tdest: TDEST value for mcdma
Radhey Shyam Pandey0894aa22018-06-13 13:04:48 +0530350 * @has_vflip: S2MM vertical flip
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530351 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530352struct xilinx_dma_chan {
353 struct xilinx_dma_device *xdev;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530354 u32 ctrl_offset;
355 u32 desc_offset;
356 spinlock_t lock;
357 struct list_head pending_list;
Kedareswara rao Appana7096f362016-02-26 19:33:51 +0530358 struct list_head active_list;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530359 struct list_head done_list;
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530360 struct list_head free_seg_list;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530361 struct dma_chan common;
362 struct dma_pool *desc_pool;
363 struct device *dev;
364 int irq;
365 int id;
366 enum dma_transfer_direction direction;
367 int num_frms;
368 bool has_sg;
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530369 bool cyclic;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530370 bool genlock;
371 bool err;
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +0530372 bool idle;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530373 struct tasklet_struct tasklet;
374 struct xilinx_vdma_config config;
375 bool flush_on_fsync;
Kedareswara rao Appana7096f362016-02-26 19:33:51 +0530376 u32 desc_pendingcount;
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530377 bool ext_addr;
Kedareswara rao Appanaa65cf5122016-04-06 10:38:09 +0530378 u32 desc_submitcount;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530379 struct xilinx_axidma_tx_segment *seg_v;
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530380 dma_addr_t seg_p;
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530381 struct xilinx_axidma_tx_segment *cyclic_seg_v;
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530382 dma_addr_t cyclic_seg_p;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530383 void (*start_transfer)(struct xilinx_dma_chan *chan);
Akinobu Mita676f9c22017-03-14 00:59:11 +0900384 int (*stop_transfer)(struct xilinx_dma_chan *chan);
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +0530385 u16 tdest;
Radhey Shyam Pandey0894aa22018-06-13 13:04:48 +0530386 bool has_vflip;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530387};
388
Lars-Peter Clausenf3ae7d92017-09-05 16:43:49 +0200389/**
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +0530390 * enum xdma_ip_type - DMA IP type.
Lars-Peter Clausenf3ae7d92017-09-05 16:43:49 +0200391 *
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +0530392 * @XDMA_TYPE_AXIDMA: Axi dma ip.
393 * @XDMA_TYPE_CDMA: Axi cdma ip.
394 * @XDMA_TYPE_VDMA: Axi vdma ip.
Lars-Peter Clausenf3ae7d92017-09-05 16:43:49 +0200395 *
396 */
397enum xdma_ip_type {
398 XDMA_TYPE_AXIDMA = 0,
399 XDMA_TYPE_CDMA,
400 XDMA_TYPE_VDMA,
401};
402
Kedareswara rao Appanafb236662016-05-13 12:33:29 +0530403struct xilinx_dma_config {
404 enum xdma_ip_type dmatype;
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +0530405 int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
406 struct clk **tx_clk, struct clk **txs_clk,
407 struct clk **rx_clk, struct clk **rxs_clk);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530408};
409
410/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530411 * struct xilinx_dma_device - DMA device structure
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530412 * @regs: I/O mapped base address
413 * @dev: Device Structure
414 * @common: DMA device structure
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530415 * @chan: Driver specific DMA channel
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +0530416 * @mcdma: Specifies whether Multi-Channel is present or not
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530417 * @flush_on_fsync: Flush on frame sync
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530418 * @ext_addr: Indicates 64 bit addressing is supported by dma device
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +0530419 * @pdev: Platform device structure pointer
Kedareswara rao Appanafb236662016-05-13 12:33:29 +0530420 * @dma_config: DMA config structure
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +0530421 * @axi_clk: DMA Axi4-lite interace clock
422 * @tx_clk: DMA mm2s clock
423 * @txs_clk: DMA mm2s stream clock
424 * @rx_clk: DMA s2mm clock
425 * @rxs_clk: DMA s2mm stream clock
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +0530426 * @nr_channels: Number of channels DMA device supports
427 * @chan_id: DMA channel identifier
Andrea Merello616f0f82018-11-20 16:31:45 +0100428 * @max_buffer_len: Max buffer length
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530429 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530430struct xilinx_dma_device {
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530431 void __iomem *regs;
432 struct device *dev;
433 struct dma_device common;
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530434 struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +0530435 bool mcdma;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530436 u32 flush_on_fsync;
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530437 bool ext_addr;
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +0530438 struct platform_device *pdev;
Kedareswara rao Appanafb236662016-05-13 12:33:29 +0530439 const struct xilinx_dma_config *dma_config;
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +0530440 struct clk *axi_clk;
441 struct clk *tx_clk;
442 struct clk *txs_clk;
443 struct clk *rx_clk;
444 struct clk *rxs_clk;
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +0530445 u32 nr_channels;
446 u32 chan_id;
Andrea Merello616f0f82018-11-20 16:31:45 +0100447 u32 max_buffer_len;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530448};
449
450/* Macros */
451#define to_xilinx_chan(chan) \
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530452 container_of(chan, struct xilinx_dma_chan, common)
453#define to_dma_tx_descriptor(tx) \
454 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
455#define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
Kedareswara rao Appana9495f262016-02-26 19:33:54 +0530456 readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
457 cond, delay_us, timeout_us)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530458
459/* IO accessors */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530460static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530461{
462 return ioread32(chan->xdev->regs + reg);
463}
464
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530465static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530466{
467 iowrite32(value, chan->xdev->regs + reg);
468}
469
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530470static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530471 u32 value)
472{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530473 dma_write(chan, chan->desc_offset + reg, value);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530474}
475
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530476static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530477{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530478 return dma_read(chan, chan->ctrl_offset + reg);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530479}
480
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530481static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530482 u32 value)
483{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530484 dma_write(chan, chan->ctrl_offset + reg, value);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530485}
486
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530487static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530488 u32 clr)
489{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530490 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530491}
492
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530493static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530494 u32 set)
495{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530496 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530497}
498
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530499/**
500 * vdma_desc_write_64 - 64-bit descriptor write
501 * @chan: Driver specific VDMA channel
502 * @reg: Register to write
503 * @value_lsb: lower address of the descriptor.
504 * @value_msb: upper address of the descriptor.
505 *
506 * Since vdma driver is trying to write to a register offset which is not a
507 * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
508 * instead of a single 64 bit register write.
509 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530510static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530511 u32 value_lsb, u32 value_msb)
512{
513 /* Write the lsb 32 bits*/
514 writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
515
516 /* Write the msb 32 bits */
517 writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530518}
519
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +0530520static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
521{
522 lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
523}
524
525static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
526 dma_addr_t addr)
527{
528 if (chan->ext_addr)
529 dma_writeq(chan, reg, addr);
530 else
531 dma_ctrl_write(chan, reg, addr);
532}
533
534static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
535 struct xilinx_axidma_desc_hw *hw,
536 dma_addr_t buf_addr, size_t sg_used,
537 size_t period_len)
538{
539 if (chan->ext_addr) {
540 hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
541 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
542 period_len);
543 } else {
544 hw->buf_addr = buf_addr + sg_used + period_len;
545 }
546}
547
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530548/* -----------------------------------------------------------------------------
549 * Descriptors and segments alloc and free
550 */
551
552/**
553 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530554 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530555 *
556 * Return: The allocated segment on success and NULL on failure.
557 */
558static struct xilinx_vdma_tx_segment *
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530559xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530560{
561 struct xilinx_vdma_tx_segment *segment;
562 dma_addr_t phys;
563
Julia Lawall2ba4f8a2016-04-29 22:09:09 +0200564 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530565 if (!segment)
566 return NULL;
567
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530568 segment->phys = phys;
569
570 return segment;
571}
572
573/**
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530574 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
575 * @chan: Driver specific DMA channel
576 *
577 * Return: The allocated segment on success and NULL on failure.
578 */
579static struct xilinx_cdma_tx_segment *
580xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
581{
582 struct xilinx_cdma_tx_segment *segment;
583 dma_addr_t phys;
584
Kedareswara rao Appana62147862016-05-18 13:17:31 +0530585 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530586 if (!segment)
587 return NULL;
588
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530589 segment->phys = phys;
590
591 return segment;
592}
593
594/**
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530595 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
596 * @chan: Driver specific DMA channel
597 *
598 * Return: The allocated segment on success and NULL on failure.
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530599 */
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530600static struct xilinx_axidma_tx_segment *
601xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
602{
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530603 struct xilinx_axidma_tx_segment *segment = NULL;
604 unsigned long flags;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530605
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530606 spin_lock_irqsave(&chan->lock, flags);
607 if (!list_empty(&chan->free_seg_list)) {
608 segment = list_first_entry(&chan->free_seg_list,
609 struct xilinx_axidma_tx_segment,
610 node);
611 list_del(&segment->node);
612 }
613 spin_unlock_irqrestore(&chan->lock, flags);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530614
Nicholas Graumann722b9e6d2019-10-15 20:18:23 +0530615 if (!segment)
616 dev_dbg(chan->dev, "Could not find free tx segment\n");
617
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530618 return segment;
619}
620
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530621static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
622{
623 u32 next_desc = hw->next_desc;
624 u32 next_desc_msb = hw->next_desc_msb;
625
626 memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
627
628 hw->next_desc = next_desc;
629 hw->next_desc_msb = next_desc_msb;
630}
631
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530632/**
633 * xilinx_dma_free_tx_segment - Free transaction segment
634 * @chan: Driver specific DMA channel
635 * @segment: DMA transaction segment
636 */
637static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
638 struct xilinx_axidma_tx_segment *segment)
639{
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530640 xilinx_dma_clean_hw_desc(&segment->hw);
641
642 list_add_tail(&segment->node, &chan->free_seg_list);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530643}
644
645/**
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530646 * xilinx_cdma_free_tx_segment - Free transaction segment
647 * @chan: Driver specific DMA channel
648 * @segment: DMA transaction segment
649 */
650static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
651 struct xilinx_cdma_tx_segment *segment)
652{
653 dma_pool_free(chan->desc_pool, segment, segment->phys);
654}
655
656/**
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530657 * xilinx_vdma_free_tx_segment - Free transaction segment
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530658 * @chan: Driver specific DMA channel
659 * @segment: DMA transaction segment
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530660 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530661static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530662 struct xilinx_vdma_tx_segment *segment)
663{
664 dma_pool_free(chan->desc_pool, segment, segment->phys);
665}
666
667/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530668 * xilinx_dma_tx_descriptor - Allocate transaction descriptor
669 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530670 *
671 * Return: The allocated descriptor on success and NULL on failure.
672 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530673static struct xilinx_dma_tx_descriptor *
674xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530675{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530676 struct xilinx_dma_tx_descriptor *desc;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530677
678 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
679 if (!desc)
680 return NULL;
681
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530682 INIT_LIST_HEAD(&desc->segments);
683
684 return desc;
685}
686
687/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530688 * xilinx_dma_free_tx_descriptor - Free transaction descriptor
689 * @chan: Driver specific DMA channel
690 * @desc: DMA transaction descriptor
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530691 */
692static void
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530693xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
694 struct xilinx_dma_tx_descriptor *desc)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530695{
696 struct xilinx_vdma_tx_segment *segment, *next;
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530697 struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530698 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530699
700 if (!desc)
701 return;
702
Kedareswara rao Appanafb236662016-05-13 12:33:29 +0530703 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530704 list_for_each_entry_safe(segment, next, &desc->segments, node) {
705 list_del(&segment->node);
706 xilinx_vdma_free_tx_segment(chan, segment);
707 }
Kedareswara rao Appanafb236662016-05-13 12:33:29 +0530708 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530709 list_for_each_entry_safe(cdma_segment, cdma_next,
710 &desc->segments, node) {
711 list_del(&cdma_segment->node);
712 xilinx_cdma_free_tx_segment(chan, cdma_segment);
713 }
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530714 } else {
715 list_for_each_entry_safe(axidma_segment, axidma_next,
716 &desc->segments, node) {
717 list_del(&axidma_segment->node);
718 xilinx_dma_free_tx_segment(chan, axidma_segment);
719 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530720 }
721
722 kfree(desc);
723}
724
725/* Required functions */
726
727/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530728 * xilinx_dma_free_desc_list - Free descriptors list
729 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530730 * @list: List to parse and delete the descriptor
731 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530732static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530733 struct list_head *list)
734{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530735 struct xilinx_dma_tx_descriptor *desc, *next;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530736
737 list_for_each_entry_safe(desc, next, list, node) {
738 list_del(&desc->node);
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530739 xilinx_dma_free_tx_descriptor(chan, desc);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530740 }
741}
742
743/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530744 * xilinx_dma_free_descriptors - Free channel descriptors
745 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530746 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530747static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530748{
749 unsigned long flags;
750
751 spin_lock_irqsave(&chan->lock, flags);
752
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530753 xilinx_dma_free_desc_list(chan, &chan->pending_list);
754 xilinx_dma_free_desc_list(chan, &chan->done_list);
755 xilinx_dma_free_desc_list(chan, &chan->active_list);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530756
757 spin_unlock_irqrestore(&chan->lock, flags);
758}
759
760/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530761 * xilinx_dma_free_chan_resources - Free channel resources
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530762 * @dchan: DMA channel
763 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530764static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530765{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530766 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530767 unsigned long flags;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530768
769 dev_dbg(chan->dev, "Free all channel resources.\n");
770
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530771 xilinx_dma_free_descriptors(chan);
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530772
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530773 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530774 spin_lock_irqsave(&chan->lock, flags);
775 INIT_LIST_HEAD(&chan->free_seg_list);
776 spin_unlock_irqrestore(&chan->lock, flags);
777
Kedareswara rao Appana0e847d42018-01-03 12:12:11 +0530778 /* Free memory that is allocated for BD */
779 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
780 XILINX_DMA_NUM_DESCS, chan->seg_v,
781 chan->seg_p);
782
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530783 /* Free Memory that is allocated for cyclic DMA Mode */
784 dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
785 chan->cyclic_seg_v, chan->cyclic_seg_p);
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530786 }
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530787
788 if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) {
789 dma_pool_destroy(chan->desc_pool);
790 chan->desc_pool = NULL;
791 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530792}
793
794/**
Nicholas Graumanna575d0b2019-10-15 20:18:21 +0530795 * xilinx_dma_get_residue - Compute residue for a given descriptor
796 * @chan: Driver specific dma channel
797 * @desc: dma transaction descriptor
798 *
799 * Return: The number of residue bytes for the descriptor.
800 */
801static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
802 struct xilinx_dma_tx_descriptor *desc)
803{
804 struct xilinx_cdma_tx_segment *cdma_seg;
805 struct xilinx_axidma_tx_segment *axidma_seg;
806 struct xilinx_cdma_desc_hw *cdma_hw;
807 struct xilinx_axidma_desc_hw *axidma_hw;
808 struct list_head *entry;
809 u32 residue = 0;
810
811 list_for_each(entry, &desc->segments) {
812 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
813 cdma_seg = list_entry(entry,
814 struct xilinx_cdma_tx_segment,
815 node);
816 cdma_hw = &cdma_seg->hw;
817 residue += (cdma_hw->control - cdma_hw->status) &
818 chan->xdev->max_buffer_len;
819 } else {
820 axidma_seg = list_entry(entry,
821 struct xilinx_axidma_tx_segment,
822 node);
823 axidma_hw = &axidma_seg->hw;
824 residue += (axidma_hw->control - axidma_hw->status) &
825 chan->xdev->max_buffer_len;
826 }
827 }
828
829 return residue;
830}
831
832/**
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530833 * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
834 * @chan: Driver specific dma channel
835 * @desc: dma transaction descriptor
836 * @flags: flags for spin lock
837 */
838static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
839 struct xilinx_dma_tx_descriptor *desc,
840 unsigned long *flags)
841{
842 dma_async_tx_callback callback;
843 void *callback_param;
844
845 callback = desc->async_tx.callback;
846 callback_param = desc->async_tx.callback_param;
847 if (callback) {
848 spin_unlock_irqrestore(&chan->lock, *flags);
849 callback(callback_param);
850 spin_lock_irqsave(&chan->lock, *flags);
851 }
852}
853
854/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530855 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
856 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530857 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530858static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530859{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530860 struct xilinx_dma_tx_descriptor *desc, *next;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530861 unsigned long flags;
862
863 spin_lock_irqsave(&chan->lock, flags);
864
865 list_for_each_entry_safe(desc, next, &chan->done_list, node) {
Nicholas Graumannd8bae212019-10-15 20:18:22 +0530866 struct dmaengine_result result;
867
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530868 if (desc->cyclic) {
869 xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
870 break;
871 }
872
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530873 /* Remove from the list of running transactions */
874 list_del(&desc->node);
875
Nicholas Graumannd8bae212019-10-15 20:18:22 +0530876 if (unlikely(desc->err)) {
877 if (chan->direction == DMA_DEV_TO_MEM)
878 result.result = DMA_TRANS_READ_FAILED;
879 else
880 result.result = DMA_TRANS_WRITE_FAILED;
881 } else {
882 result.result = DMA_TRANS_NOERROR;
883 }
884
885 result.residue = desc->residue;
886
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530887 /* Run the link descriptor callback function */
Radhey Shyam Pandey005a0172019-10-15 20:18:18 +0530888 spin_unlock_irqrestore(&chan->lock, flags);
Nicholas Graumannd8bae212019-10-15 20:18:22 +0530889 dmaengine_desc_get_callback_invoke(&desc->async_tx, &result);
Radhey Shyam Pandey005a0172019-10-15 20:18:18 +0530890 spin_lock_irqsave(&chan->lock, flags);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530891
892 /* Run any dependencies, then free the descriptor */
893 dma_run_dependencies(&desc->async_tx);
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530894 xilinx_dma_free_tx_descriptor(chan, desc);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530895 }
896
897 spin_unlock_irqrestore(&chan->lock, flags);
898}
899
900/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530901 * xilinx_dma_do_tasklet - Schedule completion tasklet
902 * @data: Pointer to the Xilinx DMA channel structure
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530903 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530904static void xilinx_dma_do_tasklet(unsigned long data)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530905{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530906 struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530907
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530908 xilinx_dma_chan_desc_cleanup(chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530909}
910
911/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530912 * xilinx_dma_alloc_chan_resources - Allocate channel resources
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530913 * @dchan: DMA channel
914 *
915 * Return: '0' on success and failure value on error
916 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530917static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530918{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530919 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530920 int i;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530921
922 /* Has this channel already been allocated? */
923 if (chan->desc_pool)
924 return 0;
925
926 /*
927 * We need the descriptor to be aligned to 64bytes
928 * for meeting Xilinx VDMA specification requirement.
929 */
Kedareswara rao Appanafb236662016-05-13 12:33:29 +0530930 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530931 /* Allocate the buffer descriptors. */
Luis Chamberlain750afb02019-01-04 09:23:09 +0100932 chan->seg_v = dma_alloc_coherent(chan->dev,
933 sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS,
934 &chan->seg_p, GFP_KERNEL);
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530935 if (!chan->seg_v) {
936 dev_err(chan->dev,
937 "unable to allocate channel %d descriptors\n",
938 chan->id);
939 return -ENOMEM;
940 }
Radhey Shyam Pandey91b43822018-09-29 11:17:57 -0600941 /*
942 * For cyclic DMA mode we need to program the tail Descriptor
943 * register with a value which is not a part of the BD chain
944 * so allocating a desc segment during channel allocation for
945 * programming tail descriptor.
946 */
Luis Chamberlain750afb02019-01-04 09:23:09 +0100947 chan->cyclic_seg_v = dma_alloc_coherent(chan->dev,
948 sizeof(*chan->cyclic_seg_v),
949 &chan->cyclic_seg_p,
950 GFP_KERNEL);
Radhey Shyam Pandey91b43822018-09-29 11:17:57 -0600951 if (!chan->cyclic_seg_v) {
952 dev_err(chan->dev,
953 "unable to allocate desc segment for cyclic DMA\n");
954 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
955 XILINX_DMA_NUM_DESCS, chan->seg_v,
956 chan->seg_p);
957 return -ENOMEM;
958 }
959 chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530960
961 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
962 chan->seg_v[i].hw.next_desc =
963 lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
964 ((i + 1) % XILINX_DMA_NUM_DESCS));
965 chan->seg_v[i].hw.next_desc_msb =
966 upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
967 ((i + 1) % XILINX_DMA_NUM_DESCS));
968 chan->seg_v[i].phys = chan->seg_p +
969 sizeof(*chan->seg_v) * i;
970 list_add_tail(&chan->seg_v[i].node,
971 &chan->free_seg_list);
972 }
Kedareswara rao Appanafb236662016-05-13 12:33:29 +0530973 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530974 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
975 chan->dev,
976 sizeof(struct xilinx_cdma_tx_segment),
977 __alignof__(struct xilinx_cdma_tx_segment),
978 0);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530979 } else {
980 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
981 chan->dev,
982 sizeof(struct xilinx_vdma_tx_segment),
983 __alignof__(struct xilinx_vdma_tx_segment),
984 0);
985 }
986
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530987 if (!chan->desc_pool &&
988 (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) {
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530989 dev_err(chan->dev,
990 "unable to allocate channel %d descriptor pool\n",
991 chan->id);
992 return -ENOMEM;
993 }
994
995 dma_cookie_init(dchan);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530996
Kedareswara rao Appanafb236662016-05-13 12:33:29 +0530997 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530998 /* For AXI DMA resetting once channel will reset the
999 * other channel as well so enable the interrupts here.
1000 */
1001 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1002 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1003 }
1004
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05301005 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301006 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1007 XILINX_CDMA_CR_SGMODE);
1008
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301009 return 0;
1010}
1011
1012/**
Andrea Merello616f0f82018-11-20 16:31:45 +01001013 * xilinx_dma_calc_copysize - Calculate the amount of data to copy
1014 * @chan: Driver specific DMA channel
1015 * @size: Total data that needs to be copied
1016 * @done: Amount of data that has been already copied
1017 *
1018 * Return: Amount of data that has to be copied
1019 */
1020static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan,
1021 int size, int done)
1022{
1023 size_t copy;
1024
1025 copy = min_t(size_t, size - done,
1026 chan->xdev->max_buffer_len);
1027
Andrea Merello5c094d42018-11-20 16:31:46 +01001028 if ((copy + done < size) &&
1029 chan->xdev->common.copy_align) {
1030 /*
1031 * If this is not the last descriptor, make sure
1032 * the next one will be properly aligned
1033 */
1034 copy = rounddown(copy,
1035 (1 << chan->xdev->common.copy_align));
1036 }
Andrea Merello616f0f82018-11-20 16:31:45 +01001037 return copy;
1038}
1039
1040/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301041 * xilinx_dma_tx_status - Get DMA transaction status
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301042 * @dchan: DMA channel
1043 * @cookie: Transaction identifier
1044 * @txstate: Transaction state
1045 *
1046 * Return: DMA transaction status
1047 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301048static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301049 dma_cookie_t cookie,
1050 struct dma_tx_state *txstate)
1051{
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301052 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1053 struct xilinx_dma_tx_descriptor *desc;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301054 enum dma_status ret;
1055 unsigned long flags;
1056 u32 residue = 0;
1057
1058 ret = dma_cookie_status(dchan, cookie, txstate);
1059 if (ret == DMA_COMPLETE || !txstate)
1060 return ret;
1061
Nicholas Graumanna575d0b2019-10-15 20:18:21 +05301062 spin_lock_irqsave(&chan->lock, flags);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301063
Nicholas Graumanna575d0b2019-10-15 20:18:21 +05301064 desc = list_last_entry(&chan->active_list,
1065 struct xilinx_dma_tx_descriptor, node);
1066 /*
1067 * VDMA and simple mode do not support residue reporting, so the
1068 * residue field will always be 0.
1069 */
1070 if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
1071 residue = xilinx_dma_get_residue(chan, desc);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301072
Nicholas Graumanna575d0b2019-10-15 20:18:21 +05301073 spin_unlock_irqrestore(&chan->lock, flags);
1074
1075 dma_set_residue(txstate, residue);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301076
1077 return ret;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301078}
1079
1080/**
Akinobu Mita676f9c22017-03-14 00:59:11 +09001081 * xilinx_dma_stop_transfer - Halt DMA channel
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301082 * @chan: Driver specific DMA channel
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +05301083 *
1084 * Return: '0' on success and failure value on error
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301085 */
Akinobu Mita676f9c22017-03-14 00:59:11 +09001086static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301087{
Kedareswara rao Appana9495f262016-02-26 19:33:54 +05301088 u32 val;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301089
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301090 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301091
1092 /* Wait for the hardware to halt */
Akinobu Mita676f9c22017-03-14 00:59:11 +09001093 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1094 val & XILINX_DMA_DMASR_HALTED, 0,
1095 XILINX_DMA_LOOP_COUNT);
1096}
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301097
Akinobu Mita676f9c22017-03-14 00:59:11 +09001098/**
1099 * xilinx_cdma_stop_transfer - Wait for the current transfer to complete
1100 * @chan: Driver specific DMA channel
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +05301101 *
1102 * Return: '0' on success and failure value on error
Akinobu Mita676f9c22017-03-14 00:59:11 +09001103 */
1104static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
1105{
1106 u32 val;
1107
1108 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1109 val & XILINX_DMA_DMASR_IDLE, 0,
1110 XILINX_DMA_LOOP_COUNT);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301111}
1112
1113/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301114 * xilinx_dma_start - Start DMA channel
1115 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301116 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301117static void xilinx_dma_start(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301118{
Kedareswara rao Appana69490632016-03-03 23:02:42 +05301119 int err;
Kedareswara rao Appana9495f262016-02-26 19:33:54 +05301120 u32 val;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301121
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301122 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301123
1124 /* Wait for the hardware to start */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301125 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1126 !(val & XILINX_DMA_DMASR_HALTED), 0,
1127 XILINX_DMA_LOOP_COUNT);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301128
Kedareswara rao Appana9495f262016-02-26 19:33:54 +05301129 if (err) {
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301130 dev_err(chan->dev, "Cannot start channel %p: %x\n",
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301131 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301132
1133 chan->err = true;
1134 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301135}
1136
1137/**
1138 * xilinx_vdma_start_transfer - Starts VDMA transfer
1139 * @chan: Driver specific channel struct pointer
1140 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301141static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301142{
1143 struct xilinx_vdma_config *config = &chan->config;
Vinod Koulf935d7d2019-05-21 19:36:44 +05301144 struct xilinx_dma_tx_descriptor *desc;
Kedareswara rao Appanafe0503e2017-12-07 10:51:03 +05301145 u32 reg, j;
Andrea Merellob8349172018-11-20 16:31:51 +01001146 struct xilinx_vdma_tx_segment *segment, *last = NULL;
1147 int i = 0;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301148
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301149 /* This function was invoked with lock held */
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301150 if (chan->err)
1151 return;
1152
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05301153 if (!chan->idle)
1154 return;
1155
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301156 if (list_empty(&chan->pending_list))
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301157 return;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301158
1159 desc = list_first_entry(&chan->pending_list,
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301160 struct xilinx_dma_tx_descriptor, node);
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301161
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301162 /* Configure the hardware using info in the config structure */
Radhey Shyam Pandey0894aa22018-06-13 13:04:48 +05301163 if (chan->has_vflip) {
1164 reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
1165 reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP;
1166 reg |= config->vflip_en;
1167 dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP,
1168 reg);
1169 }
1170
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301171 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301172
1173 if (config->frm_cnt_en)
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301174 reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301175 else
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301176 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301177
Andrea Merellob8349172018-11-20 16:31:51 +01001178 /* If not parking, enable circular mode */
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301179 if (config->park)
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301180 reg &= ~XILINX_DMA_DMACR_CIRC_EN;
Andrea Merellob8349172018-11-20 16:31:51 +01001181 else
1182 reg |= XILINX_DMA_DMACR_CIRC_EN;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301183
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301184 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301185
Kedareswara rao Appanafe0503e2017-12-07 10:51:03 +05301186 j = chan->desc_submitcount;
1187 reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
1188 if (chan->direction == DMA_MEM_TO_DEV) {
1189 reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
1190 reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
1191 } else {
1192 reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
1193 reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301194 }
Kedareswara rao Appanafe0503e2017-12-07 10:51:03 +05301195 dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301196
1197 /* Start the hardware */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301198 xilinx_dma_start(chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301199
1200 if (chan->err)
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301201 return;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301202
1203 /* Start the transfer */
Andrea Merellob8349172018-11-20 16:31:51 +01001204 if (chan->desc_submitcount < chan->num_frms)
1205 i = chan->desc_submitcount;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301206
Andrea Merellob8349172018-11-20 16:31:51 +01001207 list_for_each_entry(segment, &desc->segments, node) {
1208 if (chan->ext_addr)
1209 vdma_desc_write_64(chan,
1210 XILINX_VDMA_REG_START_ADDRESS_64(i++),
1211 segment->hw.buf_addr,
1212 segment->hw.buf_addr_msb);
1213 else
1214 vdma_desc_write(chan,
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301215 XILINX_VDMA_REG_START_ADDRESS(i++),
1216 segment->hw.buf_addr);
Kedareswara rao Appanab72db402016-04-06 10:38:08 +05301217
Andrea Merellob8349172018-11-20 16:31:51 +01001218 last = segment;
Kedareswara rao Appanaa65cf5122016-04-06 10:38:09 +05301219 }
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05301220
Andrea Merellob8349172018-11-20 16:31:51 +01001221 if (!last)
1222 return;
1223
1224 /* HW expects these parameters to be same for one transaction */
1225 vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
1226 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
1227 last->hw.stride);
1228 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
1229
1230 chan->desc_submitcount++;
1231 chan->desc_pendingcount--;
1232 list_del(&desc->node);
1233 list_add_tail(&desc->node, &chan->active_list);
1234 if (chan->desc_submitcount == chan->num_frms)
1235 chan->desc_submitcount = 0;
1236
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05301237 chan->idle = false;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301238}
1239
1240/**
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301241 * xilinx_cdma_start_transfer - Starts cdma transfer
1242 * @chan: Driver specific channel struct pointer
1243 */
1244static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1245{
1246 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1247 struct xilinx_cdma_tx_segment *tail_segment;
1248 u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
1249
1250 if (chan->err)
1251 return;
1252
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05301253 if (!chan->idle)
1254 return;
1255
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301256 if (list_empty(&chan->pending_list))
1257 return;
1258
1259 head_desc = list_first_entry(&chan->pending_list,
1260 struct xilinx_dma_tx_descriptor, node);
1261 tail_desc = list_last_entry(&chan->pending_list,
1262 struct xilinx_dma_tx_descriptor, node);
1263 tail_segment = list_last_entry(&tail_desc->segments,
1264 struct xilinx_cdma_tx_segment, node);
1265
1266 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1267 ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1268 ctrl_reg |= chan->desc_pendingcount <<
1269 XILINX_DMA_CR_COALESCE_SHIFT;
1270 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
1271 }
1272
1273 if (chan->has_sg) {
Kedareswara rao Appana48c62fb2018-01-03 12:12:09 +05301274 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
1275 XILINX_CDMA_CR_SGMODE);
1276
1277 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1278 XILINX_CDMA_CR_SGMODE);
1279
Kedareswara rao Appana9791e712016-06-07 19:21:16 +05301280 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1281 head_desc->async_tx.phys);
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301282
1283 /* Update tail ptr register which will start the transfer */
Kedareswara rao Appana9791e712016-06-07 19:21:16 +05301284 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1285 tail_segment->phys);
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301286 } else {
1287 /* In simple mode */
1288 struct xilinx_cdma_tx_segment *segment;
1289 struct xilinx_cdma_desc_hw *hw;
1290
1291 segment = list_first_entry(&head_desc->segments,
1292 struct xilinx_cdma_tx_segment,
1293 node);
1294
1295 hw = &segment->hw;
1296
Radhey Shyam Pandey0e03aca2018-09-29 11:18:00 -06001297 xilinx_write(chan, XILINX_CDMA_REG_SRCADDR,
1298 xilinx_prep_dma_addr_t(hw->src_addr));
1299 xilinx_write(chan, XILINX_CDMA_REG_DSTADDR,
1300 xilinx_prep_dma_addr_t(hw->dest_addr));
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301301
1302 /* Start the transfer */
1303 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
Andrea Merello616f0f82018-11-20 16:31:45 +01001304 hw->control & chan->xdev->max_buffer_len);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301305 }
1306
1307 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1308 chan->desc_pendingcount = 0;
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05301309 chan->idle = false;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301310}
1311
1312/**
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301313 * xilinx_dma_start_transfer - Starts DMA transfer
1314 * @chan: Driver specific channel struct pointer
1315 */
1316static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1317{
1318 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
Kedareswara rao Appana23059402017-12-07 10:51:04 +05301319 struct xilinx_axidma_tx_segment *tail_segment;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301320 u32 reg;
1321
1322 if (chan->err)
1323 return;
1324
1325 if (list_empty(&chan->pending_list))
1326 return;
1327
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05301328 if (!chan->idle)
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301329 return;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301330
1331 head_desc = list_first_entry(&chan->pending_list,
1332 struct xilinx_dma_tx_descriptor, node);
1333 tail_desc = list_last_entry(&chan->pending_list,
1334 struct xilinx_dma_tx_descriptor, node);
1335 tail_segment = list_last_entry(&tail_desc->segments,
1336 struct xilinx_axidma_tx_segment, node);
1337
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301338 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1339
1340 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1341 reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1342 reg |= chan->desc_pendingcount <<
1343 XILINX_DMA_CR_COALESCE_SHIFT;
1344 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1345 }
1346
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05301347 if (chan->has_sg && !chan->xdev->mcdma)
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +05301348 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1349 head_desc->async_tx.phys);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301350
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05301351 if (chan->has_sg && chan->xdev->mcdma) {
1352 if (chan->direction == DMA_MEM_TO_DEV) {
1353 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1354 head_desc->async_tx.phys);
1355 } else {
1356 if (!chan->tdest) {
1357 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1358 head_desc->async_tx.phys);
1359 } else {
1360 dma_ctrl_write(chan,
1361 XILINX_DMA_MCRX_CDESC(chan->tdest),
1362 head_desc->async_tx.phys);
1363 }
1364 }
1365 }
1366
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301367 xilinx_dma_start(chan);
1368
1369 if (chan->err)
1370 return;
1371
1372 /* Start the transfer */
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05301373 if (chan->has_sg && !chan->xdev->mcdma) {
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301374 if (chan->cyclic)
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +05301375 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1376 chan->cyclic_seg_v->phys);
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301377 else
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +05301378 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1379 tail_segment->phys);
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05301380 } else if (chan->has_sg && chan->xdev->mcdma) {
1381 if (chan->direction == DMA_MEM_TO_DEV) {
1382 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1383 tail_segment->phys);
1384 } else {
1385 if (!chan->tdest) {
1386 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1387 tail_segment->phys);
1388 } else {
1389 dma_ctrl_write(chan,
1390 XILINX_DMA_MCRX_TDESC(chan->tdest),
1391 tail_segment->phys);
1392 }
1393 }
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301394 } else {
1395 struct xilinx_axidma_tx_segment *segment;
1396 struct xilinx_axidma_desc_hw *hw;
1397
1398 segment = list_first_entry(&head_desc->segments,
1399 struct xilinx_axidma_tx_segment,
1400 node);
1401 hw = &segment->hw;
1402
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +05301403 xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301404
1405 /* Start the transfer */
1406 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
Andrea Merello616f0f82018-11-20 16:31:45 +01001407 hw->control & chan->xdev->max_buffer_len);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301408 }
1409
1410 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1411 chan->desc_pendingcount = 0;
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05301412 chan->idle = false;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301413}
1414
1415/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301416 * xilinx_dma_issue_pending - Issue pending transactions
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301417 * @dchan: DMA channel
1418 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301419static void xilinx_dma_issue_pending(struct dma_chan *dchan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301420{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301421 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301422 unsigned long flags;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301423
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301424 spin_lock_irqsave(&chan->lock, flags);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301425 chan->start_transfer(chan);
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301426 spin_unlock_irqrestore(&chan->lock, flags);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301427}
1428
1429/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301430 * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301431 * @chan : xilinx DMA channel
1432 *
1433 * CONTEXT: hardirq
1434 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301435static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301436{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301437 struct xilinx_dma_tx_descriptor *desc, *next;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301438
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301439 /* This function was invoked with lock held */
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301440 if (list_empty(&chan->active_list))
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301441 return;
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301442
1443 list_for_each_entry_safe(desc, next, &chan->active_list, node) {
Nicholas Graumannd8bae212019-10-15 20:18:22 +05301444 if (chan->has_sg && chan->xdev->dma_config->dmatype !=
1445 XDMA_TYPE_VDMA)
1446 desc->residue = xilinx_dma_get_residue(chan, desc);
1447 else
1448 desc->residue = 0;
1449 desc->err = chan->err;
1450
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301451 list_del(&desc->node);
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301452 if (!desc->cyclic)
1453 dma_cookie_complete(&desc->async_tx);
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301454 list_add_tail(&desc->node, &chan->done_list);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301455 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301456}
1457
1458/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301459 * xilinx_dma_reset - Reset DMA channel
1460 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301461 *
1462 * Return: '0' on success and failure value on error
1463 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301464static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301465{
Kedareswara rao Appana69490632016-03-03 23:02:42 +05301466 int err;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301467 u32 tmp;
1468
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301469 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301470
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301471 /* Wait for the hardware to finish reset */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301472 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
1473 !(tmp & XILINX_DMA_DMACR_RESET), 0,
1474 XILINX_DMA_LOOP_COUNT);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301475
Kedareswara rao Appana9495f262016-02-26 19:33:54 +05301476 if (err) {
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301477 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301478 dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
1479 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301480 return -ETIMEDOUT;
1481 }
1482
1483 chan->err = false;
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05301484 chan->idle = true;
Kedareswara rao Appanafe0503e2017-12-07 10:51:03 +05301485 chan->desc_submitcount = 0;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301486
Kedareswara rao Appana9495f262016-02-26 19:33:54 +05301487 return err;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301488}
1489
1490/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301491 * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
1492 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301493 *
1494 * Return: '0' on success and failure value on error
1495 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301496static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301497{
1498 int err;
1499
1500 /* Reset VDMA */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301501 err = xilinx_dma_reset(chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301502 if (err)
1503 return err;
1504
1505 /* Enable interrupts */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301506 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1507 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301508
1509 return 0;
1510}
1511
1512/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301513 * xilinx_dma_irq_handler - DMA Interrupt handler
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301514 * @irq: IRQ number
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301515 * @data: Pointer to the Xilinx DMA channel structure
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301516 *
1517 * Return: IRQ_HANDLED/IRQ_NONE
1518 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301519static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301520{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301521 struct xilinx_dma_chan *chan = data;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301522 u32 status;
1523
1524 /* Read the status and ack the interrupts. */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301525 status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
1526 if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301527 return IRQ_NONE;
1528
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301529 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1530 status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301531
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301532 if (status & XILINX_DMA_DMASR_ERR_IRQ) {
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301533 /*
1534 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
1535 * error is recoverable, ignore it. Otherwise flag the error.
1536 *
1537 * Only recoverable errors can be cleared in the DMASR register,
1538 * make sure not to write to other error bits to 1.
1539 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301540 u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
Kedareswara rao Appana48a59ed2016-04-06 10:44:55 +05301541
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301542 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1543 errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301544
1545 if (!chan->flush_on_fsync ||
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301546 (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301547 dev_err(chan->dev,
1548 "Channel %p has errors %x, cdr %x tdr %x\n",
1549 chan, errors,
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301550 dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
1551 dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301552 chan->err = true;
1553 }
1554 }
1555
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301556 if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301557 /*
1558 * Device takes too long to do the transfer when user requires
1559 * responsiveness.
1560 */
1561 dev_dbg(chan->dev, "Inter-packet latency too long\n");
1562 }
1563
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301564 if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301565 spin_lock(&chan->lock);
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301566 xilinx_dma_complete_descriptor(chan);
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05301567 chan->idle = true;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301568 chan->start_transfer(chan);
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301569 spin_unlock(&chan->lock);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301570 }
1571
1572 tasklet_schedule(&chan->tasklet);
1573 return IRQ_HANDLED;
1574}
1575
1576/**
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301577 * append_desc_queue - Queuing descriptor
1578 * @chan: Driver specific dma channel
1579 * @desc: dma transaction descriptor
1580 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301581static void append_desc_queue(struct xilinx_dma_chan *chan,
1582 struct xilinx_dma_tx_descriptor *desc)
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301583{
1584 struct xilinx_vdma_tx_segment *tail_segment;
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301585 struct xilinx_dma_tx_descriptor *tail_desc;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301586 struct xilinx_axidma_tx_segment *axidma_tail_segment;
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301587 struct xilinx_cdma_tx_segment *cdma_tail_segment;
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301588
1589 if (list_empty(&chan->pending_list))
1590 goto append;
1591
1592 /*
1593 * Add the hardware descriptor to the chain of hardware descriptors
1594 * that already exists in memory.
1595 */
1596 tail_desc = list_last_entry(&chan->pending_list,
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301597 struct xilinx_dma_tx_descriptor, node);
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05301598 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301599 tail_segment = list_last_entry(&tail_desc->segments,
1600 struct xilinx_vdma_tx_segment,
1601 node);
1602 tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05301603 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301604 cdma_tail_segment = list_last_entry(&tail_desc->segments,
1605 struct xilinx_cdma_tx_segment,
1606 node);
1607 cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301608 } else {
1609 axidma_tail_segment = list_last_entry(&tail_desc->segments,
1610 struct xilinx_axidma_tx_segment,
1611 node);
1612 axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1613 }
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301614
1615 /*
1616 * Add the software descriptor and all children to the list
1617 * of pending transactions
1618 */
1619append:
1620 list_add_tail(&desc->node, &chan->pending_list);
1621 chan->desc_pendingcount++;
1622
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05301623 if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
1624 && unlikely(chan->desc_pendingcount > chan->num_frms)) {
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301625 dev_dbg(chan->dev, "desc pendingcount is too high\n");
1626 chan->desc_pendingcount = chan->num_frms;
1627 }
1628}
1629
1630/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301631 * xilinx_dma_tx_submit - Submit DMA transaction
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301632 * @tx: Async transaction descriptor
1633 *
1634 * Return: cookie value on success and failure value on error
1635 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301636static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301637{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301638 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
1639 struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301640 dma_cookie_t cookie;
1641 unsigned long flags;
1642 int err;
1643
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301644 if (chan->cyclic) {
1645 xilinx_dma_free_tx_descriptor(chan, desc);
1646 return -EBUSY;
1647 }
1648
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301649 if (chan->err) {
1650 /*
1651 * If reset fails, need to hard reset the system.
1652 * Channel is no longer functional
1653 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301654 err = xilinx_dma_chan_reset(chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301655 if (err < 0)
1656 return err;
1657 }
1658
1659 spin_lock_irqsave(&chan->lock, flags);
1660
1661 cookie = dma_cookie_assign(tx);
1662
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301663 /* Put this transaction onto the tail of the pending queue */
1664 append_desc_queue(chan, desc);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301665
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301666 if (desc->cyclic)
1667 chan->cyclic = true;
1668
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301669 spin_unlock_irqrestore(&chan->lock, flags);
1670
1671 return cookie;
1672}
1673
1674/**
1675 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
1676 * DMA_SLAVE transaction
1677 * @dchan: DMA channel
1678 * @xt: Interleaved template pointer
1679 * @flags: transfer ack flags
1680 *
1681 * Return: Async transaction descriptor on success and NULL on failure
1682 */
1683static struct dma_async_tx_descriptor *
1684xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
1685 struct dma_interleaved_template *xt,
1686 unsigned long flags)
1687{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301688 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1689 struct xilinx_dma_tx_descriptor *desc;
Kedareswara rao Appana4b597c62018-01-03 12:12:10 +05301690 struct xilinx_vdma_tx_segment *segment;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301691 struct xilinx_vdma_desc_hw *hw;
1692
1693 if (!is_slave_direction(xt->dir))
1694 return NULL;
1695
1696 if (!xt->numf || !xt->sgl[0].size)
1697 return NULL;
1698
Srikanth Thokalaa5e48e22014-11-05 20:37:01 +02001699 if (xt->frame_size != 1)
1700 return NULL;
1701
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301702 /* Allocate a transaction descriptor. */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301703 desc = xilinx_dma_alloc_tx_descriptor(chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301704 if (!desc)
1705 return NULL;
1706
1707 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301708 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301709 async_tx_ack(&desc->async_tx);
1710
1711 /* Allocate the link descriptor from DMA pool */
1712 segment = xilinx_vdma_alloc_tx_segment(chan);
1713 if (!segment)
1714 goto error;
1715
1716 /* Fill in the hardware descriptor */
1717 hw = &segment->hw;
1718 hw->vsize = xt->numf;
1719 hw->hsize = xt->sgl[0].size;
Srikanth Thokala6d80f452014-11-05 20:37:02 +02001720 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301721 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301722 hw->stride |= chan->config.frm_dly <<
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301723 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301724
Kedareswara rao Appanab72db402016-04-06 10:38:08 +05301725 if (xt->dir != DMA_MEM_TO_DEV) {
1726 if (chan->ext_addr) {
1727 hw->buf_addr = lower_32_bits(xt->dst_start);
1728 hw->buf_addr_msb = upper_32_bits(xt->dst_start);
1729 } else {
1730 hw->buf_addr = xt->dst_start;
1731 }
1732 } else {
1733 if (chan->ext_addr) {
1734 hw->buf_addr = lower_32_bits(xt->src_start);
1735 hw->buf_addr_msb = upper_32_bits(xt->src_start);
1736 } else {
1737 hw->buf_addr = xt->src_start;
1738 }
1739 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301740
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301741 /* Insert the segment into the descriptor segments list. */
1742 list_add_tail(&segment->node, &desc->segments);
1743
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301744 /* Link the last hardware descriptor with the first. */
1745 segment = list_first_entry(&desc->segments,
1746 struct xilinx_vdma_tx_segment, node);
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301747 desc->async_tx.phys = segment->phys;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301748
1749 return &desc->async_tx;
1750
1751error:
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301752 xilinx_dma_free_tx_descriptor(chan, desc);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301753 return NULL;
1754}
1755
1756/**
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301757 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
1758 * @dchan: DMA channel
1759 * @dma_dst: destination address
1760 * @dma_src: source address
1761 * @len: transfer length
1762 * @flags: transfer ack flags
1763 *
1764 * Return: Async transaction descriptor on success and NULL on failure
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301765 */
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301766static struct dma_async_tx_descriptor *
1767xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
1768 dma_addr_t dma_src, size_t len, unsigned long flags)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301769{
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301770 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1771 struct xilinx_dma_tx_descriptor *desc;
Akinobu Mitadb6a3d02017-03-14 00:59:12 +09001772 struct xilinx_cdma_tx_segment *segment;
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301773 struct xilinx_cdma_desc_hw *hw;
1774
Andrea Merello616f0f82018-11-20 16:31:45 +01001775 if (!len || len > chan->xdev->max_buffer_len)
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301776 return NULL;
1777
1778 desc = xilinx_dma_alloc_tx_descriptor(chan);
1779 if (!desc)
1780 return NULL;
1781
1782 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1783 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1784
1785 /* Allocate the link descriptor from DMA pool */
1786 segment = xilinx_cdma_alloc_tx_segment(chan);
1787 if (!segment)
1788 goto error;
1789
1790 hw = &segment->hw;
1791 hw->control = len;
1792 hw->src_addr = dma_src;
1793 hw->dest_addr = dma_dst;
Kedareswara rao Appana9791e712016-06-07 19:21:16 +05301794 if (chan->ext_addr) {
1795 hw->src_addr_msb = upper_32_bits(dma_src);
1796 hw->dest_addr_msb = upper_32_bits(dma_dst);
1797 }
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301798
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301799 /* Insert the segment into the descriptor segments list. */
1800 list_add_tail(&segment->node, &desc->segments);
1801
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301802 desc->async_tx.phys = segment->phys;
Akinobu Mitadb6a3d02017-03-14 00:59:12 +09001803 hw->next_desc = segment->phys;
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301804
1805 return &desc->async_tx;
1806
1807error:
1808 xilinx_dma_free_tx_descriptor(chan, desc);
1809 return NULL;
1810}
1811
1812/**
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301813 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1814 * @dchan: DMA channel
1815 * @sgl: scatterlist to transfer to/from
1816 * @sg_len: number of entries in @scatterlist
1817 * @direction: DMA direction
1818 * @flags: transfer ack flags
1819 * @context: APP words of the descriptor
1820 *
1821 * Return: Async transaction descriptor on success and NULL on failure
1822 */
1823static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
1824 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
1825 enum dma_transfer_direction direction, unsigned long flags,
1826 void *context)
1827{
1828 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1829 struct xilinx_dma_tx_descriptor *desc;
Kedareswara rao Appana23059402017-12-07 10:51:04 +05301830 struct xilinx_axidma_tx_segment *segment = NULL;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301831 u32 *app_w = (u32 *)context;
1832 struct scatterlist *sg;
1833 size_t copy;
1834 size_t sg_used;
1835 unsigned int i;
1836
1837 if (!is_slave_direction(direction))
1838 return NULL;
1839
1840 /* Allocate a transaction descriptor. */
1841 desc = xilinx_dma_alloc_tx_descriptor(chan);
1842 if (!desc)
1843 return NULL;
1844
1845 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1846 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1847
1848 /* Build transactions using information in the scatter gather list */
1849 for_each_sg(sgl, sg, sg_len, i) {
1850 sg_used = 0;
1851
1852 /* Loop until the entire scatterlist entry is used */
1853 while (sg_used < sg_dma_len(sg)) {
1854 struct xilinx_axidma_desc_hw *hw;
1855
1856 /* Get a free segment */
1857 segment = xilinx_axidma_alloc_tx_segment(chan);
1858 if (!segment)
1859 goto error;
1860
1861 /*
1862 * Calculate the maximum number of bytes to transfer,
1863 * making sure it is less than the hw limit
1864 */
Andrea Merello616f0f82018-11-20 16:31:45 +01001865 copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg),
1866 sg_used);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301867 hw = &segment->hw;
1868
1869 /* Fill in the descriptor */
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +05301870 xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
1871 sg_used, 0);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301872
1873 hw->control = copy;
1874
1875 if (chan->direction == DMA_MEM_TO_DEV) {
1876 if (app_w)
1877 memcpy(hw->app, app_w, sizeof(u32) *
1878 XILINX_DMA_NUM_APP_WORDS);
1879 }
1880
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301881 sg_used += copy;
1882
1883 /*
1884 * Insert the segment into the descriptor segments
1885 * list.
1886 */
1887 list_add_tail(&segment->node, &desc->segments);
1888 }
1889 }
1890
1891 segment = list_first_entry(&desc->segments,
1892 struct xilinx_axidma_tx_segment, node);
1893 desc->async_tx.phys = segment->phys;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301894
1895 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1896 if (chan->direction == DMA_MEM_TO_DEV) {
1897 segment->hw.control |= XILINX_DMA_BD_SOP;
1898 segment = list_last_entry(&desc->segments,
1899 struct xilinx_axidma_tx_segment,
1900 node);
1901 segment->hw.control |= XILINX_DMA_BD_EOP;
1902 }
1903
1904 return &desc->async_tx;
1905
1906error:
1907 xilinx_dma_free_tx_descriptor(chan, desc);
1908 return NULL;
1909}
1910
1911/**
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301912 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +05301913 * @dchan: DMA channel
1914 * @buf_addr: Physical address of the buffer
1915 * @buf_len: Total length of the cyclic buffers
1916 * @period_len: length of individual cyclic buffer
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301917 * @direction: DMA direction
1918 * @flags: transfer ack flags
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +05301919 *
1920 * Return: Async transaction descriptor on success and NULL on failure
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301921 */
1922static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
1923 struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
1924 size_t period_len, enum dma_transfer_direction direction,
1925 unsigned long flags)
1926{
1927 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1928 struct xilinx_dma_tx_descriptor *desc;
1929 struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
1930 size_t copy, sg_used;
1931 unsigned int num_periods;
1932 int i;
1933 u32 reg;
1934
Arnd Bergmannf67c3bd2016-06-13 17:07:33 +02001935 if (!period_len)
1936 return NULL;
1937
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301938 num_periods = buf_len / period_len;
1939
Arnd Bergmannf67c3bd2016-06-13 17:07:33 +02001940 if (!num_periods)
1941 return NULL;
1942
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301943 if (!is_slave_direction(direction))
1944 return NULL;
1945
1946 /* Allocate a transaction descriptor. */
1947 desc = xilinx_dma_alloc_tx_descriptor(chan);
1948 if (!desc)
1949 return NULL;
1950
1951 chan->direction = direction;
1952 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1953 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1954
1955 for (i = 0; i < num_periods; ++i) {
1956 sg_used = 0;
1957
1958 while (sg_used < period_len) {
1959 struct xilinx_axidma_desc_hw *hw;
1960
1961 /* Get a free segment */
1962 segment = xilinx_axidma_alloc_tx_segment(chan);
1963 if (!segment)
1964 goto error;
1965
1966 /*
1967 * Calculate the maximum number of bytes to transfer,
1968 * making sure it is less than the hw limit
1969 */
Andrea Merello616f0f82018-11-20 16:31:45 +01001970 copy = xilinx_dma_calc_copysize(chan, period_len,
1971 sg_used);
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301972 hw = &segment->hw;
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +05301973 xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
1974 period_len * i);
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301975 hw->control = copy;
1976
1977 if (prev)
1978 prev->hw.next_desc = segment->phys;
1979
1980 prev = segment;
1981 sg_used += copy;
1982
1983 /*
1984 * Insert the segment into the descriptor segments
1985 * list.
1986 */
1987 list_add_tail(&segment->node, &desc->segments);
1988 }
1989 }
1990
1991 head_segment = list_first_entry(&desc->segments,
1992 struct xilinx_axidma_tx_segment, node);
1993 desc->async_tx.phys = head_segment->phys;
1994
1995 desc->cyclic = true;
1996 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1997 reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
1998 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1999
Kedareswara rao Appanae598e6e2016-07-09 14:09:48 +05302000 segment = list_last_entry(&desc->segments,
2001 struct xilinx_axidma_tx_segment,
2002 node);
2003 segment->hw.next_desc = (u32) head_segment->phys;
2004
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05302005 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2006 if (direction == DMA_MEM_TO_DEV) {
Kedareswara rao Appanae167a0b2016-06-09 11:32:12 +05302007 head_segment->hw.control |= XILINX_DMA_BD_SOP;
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05302008 segment->hw.control |= XILINX_DMA_BD_EOP;
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05302009 }
2010
2011 return &desc->async_tx;
2012
2013error:
2014 xilinx_dma_free_tx_descriptor(chan, desc);
2015 return NULL;
2016}
2017
2018/**
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05302019 * xilinx_dma_prep_interleaved - prepare a descriptor for a
2020 * DMA_SLAVE transaction
2021 * @dchan: DMA channel
2022 * @xt: Interleaved template pointer
2023 * @flags: transfer ack flags
2024 *
2025 * Return: Async transaction descriptor on success and NULL on failure
2026 */
2027static struct dma_async_tx_descriptor *
2028xilinx_dma_prep_interleaved(struct dma_chan *dchan,
2029 struct dma_interleaved_template *xt,
2030 unsigned long flags)
2031{
2032 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2033 struct xilinx_dma_tx_descriptor *desc;
2034 struct xilinx_axidma_tx_segment *segment;
2035 struct xilinx_axidma_desc_hw *hw;
2036
2037 if (!is_slave_direction(xt->dir))
2038 return NULL;
2039
2040 if (!xt->numf || !xt->sgl[0].size)
2041 return NULL;
2042
2043 if (xt->frame_size != 1)
2044 return NULL;
2045
2046 /* Allocate a transaction descriptor. */
2047 desc = xilinx_dma_alloc_tx_descriptor(chan);
2048 if (!desc)
2049 return NULL;
2050
2051 chan->direction = xt->dir;
2052 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2053 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2054
2055 /* Get a free segment */
2056 segment = xilinx_axidma_alloc_tx_segment(chan);
2057 if (!segment)
2058 goto error;
2059
2060 hw = &segment->hw;
2061
2062 /* Fill in the descriptor */
2063 if (xt->dir != DMA_MEM_TO_DEV)
2064 hw->buf_addr = xt->dst_start;
2065 else
2066 hw->buf_addr = xt->src_start;
2067
2068 hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK;
2069 hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) &
2070 XILINX_DMA_BD_VSIZE_MASK;
2071 hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) &
2072 XILINX_DMA_BD_STRIDE_MASK;
2073 hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK;
2074
2075 /*
2076 * Insert the segment into the descriptor segments
2077 * list.
2078 */
2079 list_add_tail(&segment->node, &desc->segments);
2080
2081
2082 segment = list_first_entry(&desc->segments,
2083 struct xilinx_axidma_tx_segment, node);
2084 desc->async_tx.phys = segment->phys;
2085
2086 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2087 if (xt->dir == DMA_MEM_TO_DEV) {
2088 segment->hw.control |= XILINX_DMA_BD_SOP;
2089 segment = list_last_entry(&desc->segments,
2090 struct xilinx_axidma_tx_segment,
2091 node);
2092 segment->hw.control |= XILINX_DMA_BD_EOP;
2093 }
2094
2095 return &desc->async_tx;
2096
2097error:
2098 xilinx_dma_free_tx_descriptor(chan, desc);
2099 return NULL;
2100}
2101
2102/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302103 * xilinx_dma_terminate_all - Halt the channel and free descriptors
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +05302104 * @dchan: Driver specific DMA Channel pointer
2105 *
2106 * Return: '0' always.
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302107 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302108static int xilinx_dma_terminate_all(struct dma_chan *dchan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302109{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302110 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05302111 u32 reg;
Akinobu Mita676f9c22017-03-14 00:59:11 +09002112 int err;
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05302113
2114 if (chan->cyclic)
2115 xilinx_dma_chan_reset(chan);
Maxime Ripardba714042014-11-17 14:42:38 +01002116
Akinobu Mita676f9c22017-03-14 00:59:11 +09002117 err = chan->stop_transfer(chan);
2118 if (err) {
2119 dev_err(chan->dev, "Cannot stop channel %p: %x\n",
2120 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
2121 chan->err = true;
2122 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302123
2124 /* Remove and free all of the descriptors in the lists */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302125 xilinx_dma_free_descriptors(chan);
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05302126 chan->idle = true;
Maxime Ripardba714042014-11-17 14:42:38 +01002127
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05302128 if (chan->cyclic) {
2129 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2130 reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2131 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2132 chan->cyclic = false;
2133 }
2134
Kedareswara rao Appana48c62fb2018-01-03 12:12:09 +05302135 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
2136 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2137 XILINX_CDMA_CR_SGMODE);
2138
Maxime Ripardba714042014-11-17 14:42:38 +01002139 return 0;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302140}
2141
2142/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302143 * xilinx_dma_channel_set_config - Configure VDMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302144 * Run-time configuration for Axi VDMA, supports:
2145 * . halt the channel
2146 * . configure interrupt coalescing and inter-packet delay threshold
2147 * . start/stop parking
2148 * . enable genlock
2149 *
2150 * @dchan: DMA channel
2151 * @cfg: VDMA device configuration pointer
2152 *
2153 * Return: '0' on success and failure value on error
2154 */
2155int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
2156 struct xilinx_vdma_config *cfg)
2157{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302158 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302159 u32 dmacr;
2160
2161 if (cfg->reset)
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302162 return xilinx_dma_chan_reset(chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302163
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302164 dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302165
2166 chan->config.frm_dly = cfg->frm_dly;
2167 chan->config.park = cfg->park;
2168
2169 /* genlock settings */
2170 chan->config.gen_lock = cfg->gen_lock;
2171 chan->config.master = cfg->master;
2172
2173 if (cfg->gen_lock && chan->genlock) {
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302174 dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
2175 dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302176 }
2177
2178 chan->config.frm_cnt_en = cfg->frm_cnt_en;
Radhey Shyam Pandey0894aa22018-06-13 13:04:48 +05302179 chan->config.vflip_en = cfg->vflip_en;
2180
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302181 if (cfg->park)
2182 chan->config.park_frm = cfg->park_frm;
2183 else
2184 chan->config.park_frm = -1;
2185
2186 chan->config.coalesc = cfg->coalesc;
2187 chan->config.delay = cfg->delay;
2188
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302189 if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
2190 dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302191 chan->config.coalesc = cfg->coalesc;
2192 }
2193
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302194 if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
2195 dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302196 chan->config.delay = cfg->delay;
2197 }
2198
2199 /* FSync Source selection */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302200 dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
2201 dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302202
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302203 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302204
2205 return 0;
2206}
2207EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
2208
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302209/* -----------------------------------------------------------------------------
2210 * Probe and remove
2211 */
2212
2213/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302214 * xilinx_dma_chan_remove - Per Channel remove function
2215 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302216 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302217static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302218{
2219 /* Disable all interrupts */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302220 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2221 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302222
2223 if (chan->irq > 0)
2224 free_irq(chan->irq, chan);
2225
2226 tasklet_kill(&chan->tasklet);
2227
2228 list_del(&chan->common.device_node);
2229}
2230
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302231static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2232 struct clk **tx_clk, struct clk **rx_clk,
2233 struct clk **sg_clk, struct clk **tmp_clk)
2234{
2235 int err;
2236
2237 *tmp_clk = NULL;
2238
2239 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2240 if (IS_ERR(*axi_clk)) {
2241 err = PTR_ERR(*axi_clk);
Radhey Shyam Pandey944879b2019-09-26 16:21:00 +05302242 if (err != -EPROBE_DEFER)
2243 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n",
2244 err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302245 return err;
2246 }
2247
2248 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2249 if (IS_ERR(*tx_clk))
2250 *tx_clk = NULL;
2251
2252 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2253 if (IS_ERR(*rx_clk))
2254 *rx_clk = NULL;
2255
2256 *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
2257 if (IS_ERR(*sg_clk))
2258 *sg_clk = NULL;
2259
2260 err = clk_prepare_enable(*axi_clk);
2261 if (err) {
Lars-Peter Clausen574897d2017-08-31 13:35:10 +02002262 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302263 return err;
2264 }
2265
2266 err = clk_prepare_enable(*tx_clk);
2267 if (err) {
Lars-Peter Clausen574897d2017-08-31 13:35:10 +02002268 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302269 goto err_disable_axiclk;
2270 }
2271
2272 err = clk_prepare_enable(*rx_clk);
2273 if (err) {
Lars-Peter Clausen574897d2017-08-31 13:35:10 +02002274 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302275 goto err_disable_txclk;
2276 }
2277
2278 err = clk_prepare_enable(*sg_clk);
2279 if (err) {
Lars-Peter Clausen574897d2017-08-31 13:35:10 +02002280 dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302281 goto err_disable_rxclk;
2282 }
2283
2284 return 0;
2285
2286err_disable_rxclk:
2287 clk_disable_unprepare(*rx_clk);
2288err_disable_txclk:
2289 clk_disable_unprepare(*tx_clk);
2290err_disable_axiclk:
2291 clk_disable_unprepare(*axi_clk);
2292
2293 return err;
2294}
2295
2296static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2297 struct clk **dev_clk, struct clk **tmp_clk,
2298 struct clk **tmp1_clk, struct clk **tmp2_clk)
2299{
2300 int err;
2301
2302 *tmp_clk = NULL;
2303 *tmp1_clk = NULL;
2304 *tmp2_clk = NULL;
2305
2306 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2307 if (IS_ERR(*axi_clk)) {
2308 err = PTR_ERR(*axi_clk);
Radhey Shyam Pandey944879b2019-09-26 16:21:00 +05302309 if (err != -EPROBE_DEFER)
2310 dev_err(&pdev->dev, "failed to get axi_clk (%d)\n",
2311 err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302312 return err;
2313 }
2314
2315 *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
2316 if (IS_ERR(*dev_clk)) {
2317 err = PTR_ERR(*dev_clk);
Radhey Shyam Pandey944879b2019-09-26 16:21:00 +05302318 if (err != -EPROBE_DEFER)
2319 dev_err(&pdev->dev, "failed to get dev_clk (%d)\n",
2320 err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302321 return err;
2322 }
2323
2324 err = clk_prepare_enable(*axi_clk);
2325 if (err) {
Lars-Peter Clausen574897d2017-08-31 13:35:10 +02002326 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302327 return err;
2328 }
2329
2330 err = clk_prepare_enable(*dev_clk);
2331 if (err) {
Lars-Peter Clausen574897d2017-08-31 13:35:10 +02002332 dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302333 goto err_disable_axiclk;
2334 }
2335
2336 return 0;
2337
2338err_disable_axiclk:
2339 clk_disable_unprepare(*axi_clk);
2340
2341 return err;
2342}
2343
2344static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2345 struct clk **tx_clk, struct clk **txs_clk,
2346 struct clk **rx_clk, struct clk **rxs_clk)
2347{
2348 int err;
2349
2350 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2351 if (IS_ERR(*axi_clk)) {
2352 err = PTR_ERR(*axi_clk);
Radhey Shyam Pandey944879b2019-09-26 16:21:00 +05302353 if (err != -EPROBE_DEFER)
2354 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n",
2355 err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302356 return err;
2357 }
2358
2359 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2360 if (IS_ERR(*tx_clk))
2361 *tx_clk = NULL;
2362
2363 *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
2364 if (IS_ERR(*txs_clk))
2365 *txs_clk = NULL;
2366
2367 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2368 if (IS_ERR(*rx_clk))
2369 *rx_clk = NULL;
2370
2371 *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
2372 if (IS_ERR(*rxs_clk))
2373 *rxs_clk = NULL;
2374
2375 err = clk_prepare_enable(*axi_clk);
2376 if (err) {
Radhey Shyam Pandey944879b2019-09-26 16:21:00 +05302377 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n",
2378 err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302379 return err;
2380 }
2381
2382 err = clk_prepare_enable(*tx_clk);
2383 if (err) {
Lars-Peter Clausen574897d2017-08-31 13:35:10 +02002384 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302385 goto err_disable_axiclk;
2386 }
2387
2388 err = clk_prepare_enable(*txs_clk);
2389 if (err) {
Lars-Peter Clausen574897d2017-08-31 13:35:10 +02002390 dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302391 goto err_disable_txclk;
2392 }
2393
2394 err = clk_prepare_enable(*rx_clk);
2395 if (err) {
Lars-Peter Clausen574897d2017-08-31 13:35:10 +02002396 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302397 goto err_disable_txsclk;
2398 }
2399
2400 err = clk_prepare_enable(*rxs_clk);
2401 if (err) {
Lars-Peter Clausen574897d2017-08-31 13:35:10 +02002402 dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302403 goto err_disable_rxclk;
2404 }
2405
2406 return 0;
2407
2408err_disable_rxclk:
2409 clk_disable_unprepare(*rx_clk);
2410err_disable_txsclk:
2411 clk_disable_unprepare(*txs_clk);
2412err_disable_txclk:
2413 clk_disable_unprepare(*tx_clk);
2414err_disable_axiclk:
2415 clk_disable_unprepare(*axi_clk);
2416
2417 return err;
2418}
2419
2420static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
2421{
2422 clk_disable_unprepare(xdev->rxs_clk);
2423 clk_disable_unprepare(xdev->rx_clk);
2424 clk_disable_unprepare(xdev->txs_clk);
2425 clk_disable_unprepare(xdev->tx_clk);
2426 clk_disable_unprepare(xdev->axi_clk);
2427}
2428
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302429/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302430 * xilinx_dma_chan_probe - Per Channel Probing
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302431 * It get channel features from the device tree entry and
2432 * initialize special channel handling routines
2433 *
2434 * @xdev: Driver specific device structure
2435 * @node: Device node
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +05302436 * @chan_id: DMA Channel id
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302437 *
2438 * Return: '0' on success and failure value on error
2439 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302440static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05302441 struct device_node *node, int chan_id)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302442{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302443 struct xilinx_dma_chan *chan;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302444 bool has_dre = false;
2445 u32 value, width;
2446 int err;
2447
2448 /* Allocate and initialize the channel structure */
2449 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
2450 if (!chan)
2451 return -ENOMEM;
2452
2453 chan->dev = xdev->dev;
2454 chan->xdev = xdev;
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05302455 chan->desc_pendingcount = 0x0;
Kedareswara rao Appanab72db402016-04-06 10:38:08 +05302456 chan->ext_addr = xdev->ext_addr;
Vinod Koul30931862017-12-18 10:48:05 +05302457 /* This variable ensures that descriptors are not
2458 * Submitted when dma engine is in progress. This variable is
2459 * Added to avoid polling for a bit in the status register to
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05302460 * Know dma state in the driver hot path.
2461 */
2462 chan->idle = true;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302463
2464 spin_lock_init(&chan->lock);
2465 INIT_LIST_HEAD(&chan->pending_list);
2466 INIT_LIST_HEAD(&chan->done_list);
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05302467 INIT_LIST_HEAD(&chan->active_list);
Kedareswara rao Appana23059402017-12-07 10:51:04 +05302468 INIT_LIST_HEAD(&chan->free_seg_list);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302469
2470 /* Retrieve the channel properties from the device tree */
2471 has_dre = of_property_read_bool(node, "xlnx,include-dre");
2472
2473 chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
2474
2475 err = of_property_read_u32(node, "xlnx,datawidth", &value);
2476 if (err) {
2477 dev_err(xdev->dev, "missing xlnx,datawidth property\n");
2478 return err;
2479 }
2480 width = value >> 3; /* Convert bits to bytes */
2481
2482 /* If data width is greater than 8 bytes, DRE is not in hw */
2483 if (width > 8)
2484 has_dre = false;
2485
2486 if (!has_dre)
2487 xdev->common.copy_align = fls(width - 1);
2488
Kedareswara rao Appanae131f1b2016-06-24 10:51:26 +05302489 if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
2490 of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
2491 of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302492 chan->direction = DMA_MEM_TO_DEV;
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05302493 chan->id = chan_id;
2494 chan->tdest = chan_id;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302495
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302496 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302497 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302498 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
Kedareswara rao Appanafe0503e2017-12-07 10:51:03 +05302499 chan->config.park = 1;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302500
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302501 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2502 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
2503 chan->flush_on_fsync = true;
2504 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302505 } else if (of_device_is_compatible(node,
Kedareswara rao Appanae131f1b2016-06-24 10:51:26 +05302506 "xlnx,axi-vdma-s2mm-channel") ||
2507 of_device_is_compatible(node,
2508 "xlnx,axi-dma-s2mm-channel")) {
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302509 chan->direction = DMA_DEV_TO_MEM;
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05302510 chan->id = chan_id;
2511 chan->tdest = chan_id - xdev->nr_channels;
Radhey Shyam Pandey0894aa22018-06-13 13:04:48 +05302512 chan->has_vflip = of_property_read_bool(node,
2513 "xlnx,enable-vert-flip");
2514 if (chan->has_vflip) {
2515 chan->config.vflip_en = dma_read(chan,
2516 XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) &
2517 XILINX_VDMA_ENABLE_VERTICAL_FLIP;
2518 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302519
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302520 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302521 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302522 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
Kedareswara rao Appanafe0503e2017-12-07 10:51:03 +05302523 chan->config.park = 1;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302524
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302525 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2526 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
2527 chan->flush_on_fsync = true;
2528 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302529 } else {
2530 dev_err(xdev->dev, "Invalid channel compatible node\n");
2531 return -EINVAL;
2532 }
2533
2534 /* Request the interrupt */
2535 chan->irq = irq_of_parse_and_map(node, 0);
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302536 err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED,
2537 "xilinx-dma-controller", chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302538 if (err) {
2539 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
2540 return err;
2541 }
2542
Akinobu Mita676f9c22017-03-14 00:59:11 +09002543 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302544 chan->start_transfer = xilinx_dma_start_transfer;
Akinobu Mita676f9c22017-03-14 00:59:11 +09002545 chan->stop_transfer = xilinx_dma_stop_transfer;
2546 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05302547 chan->start_transfer = xilinx_cdma_start_transfer;
Akinobu Mita676f9c22017-03-14 00:59:11 +09002548 chan->stop_transfer = xilinx_cdma_stop_transfer;
2549 } else {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302550 chan->start_transfer = xilinx_vdma_start_transfer;
Akinobu Mita676f9c22017-03-14 00:59:11 +09002551 chan->stop_transfer = xilinx_dma_stop_transfer;
2552 }
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302553
Andrea Merello05f7ea72018-11-20 16:31:49 +01002554 /* check if SG is enabled (only for AXIDMA and CDMA) */
2555 if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) {
2556 if (dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
2557 XILINX_DMA_DMASR_SG_MASK)
2558 chan->has_sg = true;
2559 dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id,
2560 chan->has_sg ? "enabled" : "disabled");
2561 }
2562
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302563 /* Initialize the tasklet */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302564 tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302565 (unsigned long)chan);
2566
2567 /*
2568 * Initialize the DMA channel and add it to the DMA engine channels
2569 * list.
2570 */
2571 chan->common.device = &xdev->common;
2572
2573 list_add_tail(&chan->common.device_node, &xdev->common.channels);
2574 xdev->chan[chan->id] = chan;
2575
2576 /* Reset the channel */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302577 err = xilinx_dma_chan_reset(chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302578 if (err < 0) {
2579 dev_err(xdev->dev, "Reset channel failed\n");
2580 return err;
2581 }
2582
2583 return 0;
2584}
2585
2586/**
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05302587 * xilinx_dma_child_probe - Per child node probe
2588 * It get number of dma-channels per child node from
2589 * device-tree and initializes all the channels.
2590 *
2591 * @xdev: Driver specific device structure
2592 * @node: Device node
2593 *
2594 * Return: 0 always.
2595 */
2596static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
Kedareswara rao Appana22653af2017-12-07 10:51:06 +05302597 struct device_node *node)
2598{
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05302599 int ret, i, nr_channels = 1;
2600
2601 ret = of_property_read_u32(node, "dma-channels", &nr_channels);
2602 if ((ret < 0) && xdev->mcdma)
2603 dev_warn(xdev->dev, "missing dma-channels property\n");
2604
2605 for (i = 0; i < nr_channels; i++)
2606 xilinx_dma_chan_probe(xdev, node, xdev->chan_id++);
2607
2608 xdev->nr_channels += nr_channels;
2609
2610 return 0;
2611}
2612
2613/**
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302614 * of_dma_xilinx_xlate - Translation function
2615 * @dma_spec: Pointer to DMA specifier as found in the device tree
2616 * @ofdma: Pointer to DMA controller data
2617 *
2618 * Return: DMA channel pointer on success and NULL on error
2619 */
2620static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
2621 struct of_dma *ofdma)
2622{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302623 struct xilinx_dma_device *xdev = ofdma->of_dma_data;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302624 int chan_id = dma_spec->args[0];
2625
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05302626 if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id])
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302627 return NULL;
2628
2629 return dma_get_slave_channel(&xdev->chan[chan_id]->common);
2630}
2631
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302632static const struct xilinx_dma_config axidma_config = {
2633 .dmatype = XDMA_TYPE_AXIDMA,
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302634 .clk_init = axidma_clk_init,
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302635};
2636
2637static const struct xilinx_dma_config axicdma_config = {
2638 .dmatype = XDMA_TYPE_CDMA,
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302639 .clk_init = axicdma_clk_init,
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302640};
2641
2642static const struct xilinx_dma_config axivdma_config = {
2643 .dmatype = XDMA_TYPE_VDMA,
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302644 .clk_init = axivdma_clk_init,
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302645};
2646
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302647static const struct of_device_id xilinx_dma_of_ids[] = {
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302648 { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
2649 { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
2650 { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302651 {}
2652};
2653MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
2654
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302655/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302656 * xilinx_dma_probe - Driver probe function
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302657 * @pdev: Pointer to the platform_device structure
2658 *
2659 * Return: '0' on success and failure value on error
2660 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302661static int xilinx_dma_probe(struct platform_device *pdev)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302662{
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302663 int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
2664 struct clk **, struct clk **, struct clk **)
2665 = axivdma_clk_init;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302666 struct device_node *node = pdev->dev.of_node;
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302667 struct xilinx_dma_device *xdev;
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302668 struct device_node *child, *np = pdev->dev.of_node;
Radhey Shyam Pandeyae809692018-11-20 16:31:48 +01002669 u32 num_frames, addr_width, len_width;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302670 int i, err;
2671
2672 /* Allocate and initialize the DMA engine structure */
2673 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
2674 if (!xdev)
2675 return -ENOMEM;
2676
2677 xdev->dev = &pdev->dev;
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302678 if (np) {
2679 const struct of_device_id *match;
2680
2681 match = of_match_node(xilinx_dma_of_ids, np);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302682 if (match && match->data) {
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302683 xdev->dma_config = match->data;
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302684 clk_init = xdev->dma_config->clk_init;
2685 }
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302686 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302687
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302688 err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
2689 &xdev->rx_clk, &xdev->rxs_clk);
2690 if (err)
2691 return err;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302692
2693 /* Request and map I/O memory */
Radhey Shyam Pandeya8bd4752019-09-26 16:20:59 +05302694 xdev->regs = devm_platform_ioremap_resource(pdev, 0);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302695 if (IS_ERR(xdev->regs))
2696 return PTR_ERR(xdev->regs);
2697
2698 /* Retrieve the DMA engine properties from the device tree */
Radhey Shyam Pandeyae809692018-11-20 16:31:48 +01002699 xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
Andrea Merello616f0f82018-11-20 16:31:45 +01002700
Radhey Shyam Pandeyae809692018-11-20 16:31:48 +01002701 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05302702 xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
Radhey Shyam Pandeyae809692018-11-20 16:31:48 +01002703 if (!of_property_read_u32(node, "xlnx,sg-length-width",
2704 &len_width)) {
2705 if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
2706 len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) {
2707 dev_warn(xdev->dev,
2708 "invalid xlnx,sg-length-width property value. Using default width\n");
2709 } else {
2710 if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX)
2711 dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n");
2712 xdev->max_buffer_len =
2713 GENMASK(len_width - 1, 0);
2714 }
2715 }
2716 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302717
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302718 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302719 err = of_property_read_u32(node, "xlnx,num-fstores",
2720 &num_frames);
2721 if (err < 0) {
2722 dev_err(xdev->dev,
2723 "missing xlnx,num-fstores property\n");
2724 return err;
2725 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302726
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302727 err = of_property_read_u32(node, "xlnx,flush-fsync",
2728 &xdev->flush_on_fsync);
2729 if (err < 0)
2730 dev_warn(xdev->dev,
2731 "missing xlnx,flush-fsync property\n");
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302732 }
2733
Kedareswara rao Appanab72db402016-04-06 10:38:08 +05302734 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302735 if (err < 0)
Kedareswara rao Appanab72db402016-04-06 10:38:08 +05302736 dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
2737
2738 if (addr_width > 32)
2739 xdev->ext_addr = true;
2740 else
2741 xdev->ext_addr = false;
2742
2743 /* Set the dma mask bits */
2744 dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width));
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302745
2746 /* Initialize the DMA engine */
2747 xdev->common.dev = &pdev->dev;
2748
2749 INIT_LIST_HEAD(&xdev->common.channels);
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302750 if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05302751 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
2752 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
2753 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302754
2755 xdev->common.device_alloc_chan_resources =
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302756 xilinx_dma_alloc_chan_resources;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302757 xdev->common.device_free_chan_resources =
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302758 xilinx_dma_free_chan_resources;
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302759 xdev->common.device_terminate_all = xilinx_dma_terminate_all;
2760 xdev->common.device_tx_status = xilinx_dma_tx_status;
2761 xdev->common.device_issue_pending = xilinx_dma_issue_pending;
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302762 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05302763 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302764 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05302765 xdev->common.device_prep_dma_cyclic =
2766 xilinx_dma_prep_dma_cyclic;
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05302767 xdev->common.device_prep_interleaved_dma =
2768 xilinx_dma_prep_interleaved;
Nicholas Graumanna575d0b2019-10-15 20:18:21 +05302769 /* Residue calculation is supported by only AXI DMA and CDMA */
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302770 xdev->common.residue_granularity =
2771 DMA_RESIDUE_GRANULARITY_SEGMENT;
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302772 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05302773 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
2774 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
Nicholas Graumanna575d0b2019-10-15 20:18:21 +05302775 /* Residue calculation is supported by only AXI DMA and CDMA */
2776 xdev->common.residue_granularity =
2777 DMA_RESIDUE_GRANULARITY_SEGMENT;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302778 } else {
2779 xdev->common.device_prep_interleaved_dma =
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302780 xilinx_vdma_dma_prep_interleaved;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302781 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302782
2783 platform_set_drvdata(pdev, xdev);
2784
2785 /* Initialize the channels */
2786 for_each_child_of_node(node, child) {
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05302787 err = xilinx_dma_child_probe(xdev, child);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302788 if (err < 0)
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302789 goto disable_clks;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302790 }
2791
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302792 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05302793 for (i = 0; i < xdev->nr_channels; i++)
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302794 if (xdev->chan[i])
2795 xdev->chan[i]->num_frms = num_frames;
2796 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302797
2798 /* Register the DMA engine with the core */
2799 dma_async_device_register(&xdev->common);
2800
2801 err = of_dma_controller_register(node, of_dma_xilinx_xlate,
2802 xdev);
2803 if (err < 0) {
2804 dev_err(&pdev->dev, "Unable to register DMA to DT\n");
2805 dma_async_device_unregister(&xdev->common);
2806 goto error;
2807 }
2808
Kedareswara rao Appanac7a03592017-12-07 10:51:07 +05302809 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
2810 dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
2811 else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
2812 dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
2813 else
2814 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302815
2816 return 0;
2817
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302818disable_clks:
2819 xdma_disable_allclks(xdev);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302820error:
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05302821 for (i = 0; i < xdev->nr_channels; i++)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302822 if (xdev->chan[i])
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302823 xilinx_dma_chan_remove(xdev->chan[i]);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302824
2825 return err;
2826}
2827
2828/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302829 * xilinx_dma_remove - Driver remove function
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302830 * @pdev: Pointer to the platform_device structure
2831 *
2832 * Return: Always '0'
2833 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302834static int xilinx_dma_remove(struct platform_device *pdev)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302835{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302836 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302837 int i;
2838
2839 of_dma_controller_free(pdev->dev.of_node);
2840
2841 dma_async_device_unregister(&xdev->common);
2842
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05302843 for (i = 0; i < xdev->nr_channels; i++)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302844 if (xdev->chan[i])
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302845 xilinx_dma_chan_remove(xdev->chan[i]);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302846
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302847 xdma_disable_allclks(xdev);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302848
2849 return 0;
2850}
2851
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302852static struct platform_driver xilinx_vdma_driver = {
2853 .driver = {
2854 .name = "xilinx-vdma",
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302855 .of_match_table = xilinx_dma_of_ids,
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302856 },
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302857 .probe = xilinx_dma_probe,
2858 .remove = xilinx_dma_remove,
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302859};
2860
2861module_platform_driver(xilinx_vdma_driver);
2862
2863MODULE_AUTHOR("Xilinx, Inc.");
2864MODULE_DESCRIPTION("Xilinx VDMA driver");
2865MODULE_LICENSE("GPL v2");