blob: d24d1a2f2bffd5ffcb11ef5c80601afd5f5e55e5 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302/*
3 * DMA driver for Xilinx Video DMA Engine
4 *
5 * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
6 *
7 * Based on the Freescale DMA driver.
8 *
9 * Description:
10 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
11 * core that provides high-bandwidth direct memory access between memory
12 * and AXI4-Stream type video target peripherals. The core provides efficient
13 * two dimensional DMA operations with independent asynchronous read (S2MM)
14 * and write (MM2S) channel operation. It can be configured to have either
15 * one channel or two channels. If configured as two channels, one is to
16 * transmit to the video device (MM2S) and another is to receive from the
17 * video device (S2MM). Initialization, status, interrupt and management
18 * registers are accessed through an AXI4-Lite slave interface.
19 *
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +053020 * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
21 * provides high-bandwidth one dimensional direct memory access between memory
22 * and AXI4-Stream target peripherals. It supports one receive and one
23 * transmit channel, both of them optional at synthesis time.
24 *
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +053025 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
26 * Access (DMA) between a memory-mapped source address and a memory-mapped
27 * destination address.
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +053028 *
29 * The AXI Multichannel Direct Memory Access (AXI MCDMA) core is a soft
30 * Xilinx IP that provides high-bandwidth direct memory access between
31 * memory and AXI4-Stream target peripherals. It provides scatter gather
32 * (SG) interface with multiple channels independent configuration support.
33 *
Srikanth Thokala9cd43602014-04-23 20:23:26 +053034 */
35
Srikanth Thokala9cd43602014-04-23 20:23:26 +053036#include <linux/bitops.h>
37#include <linux/dmapool.h>
Kedareswara rao Appana937abe82015-03-02 23:24:24 +053038#include <linux/dma/xilinx_dma.h>
Srikanth Thokala9cd43602014-04-23 20:23:26 +053039#include <linux/init.h>
40#include <linux/interrupt.h>
41#include <linux/io.h>
Kedareswara rao Appana9495f262016-02-26 19:33:54 +053042#include <linux/iopoll.h>
Srikanth Thokala9cd43602014-04-23 20:23:26 +053043#include <linux/module.h>
44#include <linux/of_address.h>
45#include <linux/of_dma.h>
46#include <linux/of_platform.h>
47#include <linux/of_irq.h>
48#include <linux/slab.h>
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +053049#include <linux/clk.h>
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +053050#include <linux/io-64-nonatomic-lo-hi.h>
Srikanth Thokala9cd43602014-04-23 20:23:26 +053051
52#include "../dmaengine.h"
53
54/* Register/Descriptor Offsets */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +053055#define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
56#define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
Srikanth Thokala9cd43602014-04-23 20:23:26 +053057#define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
58#define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
59
60/* Control Registers */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +053061#define XILINX_DMA_REG_DMACR 0x0000
62#define XILINX_DMA_DMACR_DELAY_MAX 0xff
63#define XILINX_DMA_DMACR_DELAY_SHIFT 24
64#define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
65#define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
66#define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
67#define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
68#define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
69#define XILINX_DMA_DMACR_MASTER_SHIFT 8
70#define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
71#define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
72#define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
73#define XILINX_DMA_DMACR_RESET BIT(2)
74#define XILINX_DMA_DMACR_CIRC_EN BIT(1)
75#define XILINX_DMA_DMACR_RUNSTOP BIT(0)
76#define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
Srikanth Thokala9cd43602014-04-23 20:23:26 +053077
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +053078#define XILINX_DMA_REG_DMASR 0x0004
79#define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
80#define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
81#define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
82#define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
83#define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
84#define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
85#define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
86#define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
87#define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
88#define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
89#define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
90#define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
Andrea Merello05f7ea72018-11-20 16:31:49 +010091#define XILINX_DMA_DMASR_SG_MASK BIT(3)
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +053092#define XILINX_DMA_DMASR_IDLE BIT(1)
93#define XILINX_DMA_DMASR_HALTED BIT(0)
94#define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
95#define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
Srikanth Thokala9cd43602014-04-23 20:23:26 +053096
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +053097#define XILINX_DMA_REG_CURDESC 0x0008
98#define XILINX_DMA_REG_TAILDESC 0x0010
99#define XILINX_DMA_REG_REG_INDEX 0x0014
100#define XILINX_DMA_REG_FRMSTORE 0x0018
101#define XILINX_DMA_REG_THRESHOLD 0x001c
102#define XILINX_DMA_REG_FRMPTR_STS 0x0024
103#define XILINX_DMA_REG_PARK_PTR 0x0028
104#define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
Kedareswara rao Appanafe0503e2017-12-07 10:51:03 +0530105#define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530106#define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
Kedareswara rao Appanafe0503e2017-12-07 10:51:03 +0530107#define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530108#define XILINX_DMA_REG_VDMA_VERSION 0x002c
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530109
110/* Register Direct Mode Registers */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530111#define XILINX_DMA_REG_VSIZE 0x0000
112#define XILINX_DMA_REG_HSIZE 0x0004
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530113
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530114#define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
115#define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
116#define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530117
118#define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530119#define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530120
Radhey Shyam Pandey0894aa22018-06-13 13:04:48 +0530121#define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec
122#define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
123
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530124/* HW specific definitions */
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +0530125#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530126
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530127#define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
128 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
129 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
130 XILINX_DMA_DMASR_ERR_IRQ)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530131
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530132#define XILINX_DMA_DMASR_ALL_ERR_MASK \
133 (XILINX_DMA_DMASR_EOL_LATE_ERR | \
134 XILINX_DMA_DMASR_SOF_LATE_ERR | \
135 XILINX_DMA_DMASR_SG_DEC_ERR | \
136 XILINX_DMA_DMASR_SG_SLV_ERR | \
137 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
138 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
139 XILINX_DMA_DMASR_DMA_DEC_ERR | \
140 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
141 XILINX_DMA_DMASR_DMA_INT_ERR)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530142
143/*
144 * Recoverable errors are DMA Internal error, SOF Early, EOF Early
145 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
146 * is enabled in the h/w system.
147 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530148#define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
149 (XILINX_DMA_DMASR_SOF_LATE_ERR | \
150 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
151 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
152 XILINX_DMA_DMASR_DMA_INT_ERR)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530153
154/* Axi VDMA Flush on Fsync bits */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530155#define XILINX_DMA_FLUSH_S2MM 3
156#define XILINX_DMA_FLUSH_MM2S 2
157#define XILINX_DMA_FLUSH_BOTH 1
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530158
159/* Delay loop counter to prevent hardware failure */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530160#define XILINX_DMA_LOOP_COUNT 1000000
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530161
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530162/* AXI DMA Specific Registers/Offsets */
163#define XILINX_DMA_REG_SRCDSTADDR 0x18
164#define XILINX_DMA_REG_BTT 0x28
165
166/* AXI DMA Specific Masks/Bit fields */
Radhey Shyam Pandeyae809692018-11-20 16:31:48 +0100167#define XILINX_DMA_MAX_TRANS_LEN_MIN 8
168#define XILINX_DMA_MAX_TRANS_LEN_MAX 23
169#define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530170#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530171#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530172#define XILINX_DMA_CR_COALESCE_SHIFT 16
173#define XILINX_DMA_BD_SOP BIT(27)
174#define XILINX_DMA_BD_EOP BIT(26)
175#define XILINX_DMA_COALESCE_MAX 255
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530176#define XILINX_DMA_NUM_DESCS 255
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530177#define XILINX_DMA_NUM_APP_WORDS 5
178
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530179/* AXI CDMA Specific Registers/Offsets */
180#define XILINX_CDMA_REG_SRCADDR 0x18
181#define XILINX_CDMA_REG_DSTADDR 0x20
182
183/* AXI CDMA Specific Masks */
184#define XILINX_CDMA_CR_SGMODE BIT(3)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530185
Radhey Shyam Pandey4e47d242018-09-29 11:17:59 -0600186#define xilinx_prep_dma_addr_t(addr) \
187 ((dma_addr_t)((u64)addr##_##msb << 32 | (addr)))
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +0530188
189/* AXI MCDMA Specific Registers/Offsets */
190#define XILINX_MCDMA_MM2S_CTRL_OFFSET 0x0000
191#define XILINX_MCDMA_S2MM_CTRL_OFFSET 0x0500
192#define XILINX_MCDMA_CHEN_OFFSET 0x0008
193#define XILINX_MCDMA_CH_ERR_OFFSET 0x0010
194#define XILINX_MCDMA_RXINT_SER_OFFSET 0x0020
195#define XILINX_MCDMA_TXINT_SER_OFFSET 0x0028
196#define XILINX_MCDMA_CHAN_CR_OFFSET(x) (0x40 + (x) * 0x40)
197#define XILINX_MCDMA_CHAN_SR_OFFSET(x) (0x44 + (x) * 0x40)
198#define XILINX_MCDMA_CHAN_CDESC_OFFSET(x) (0x48 + (x) * 0x40)
199#define XILINX_MCDMA_CHAN_TDESC_OFFSET(x) (0x50 + (x) * 0x40)
200
201/* AXI MCDMA Specific Masks/Shifts */
202#define XILINX_MCDMA_COALESCE_SHIFT 16
203#define XILINX_MCDMA_COALESCE_MAX 24
204#define XILINX_MCDMA_IRQ_ALL_MASK GENMASK(7, 5)
205#define XILINX_MCDMA_COALESCE_MASK GENMASK(23, 16)
206#define XILINX_MCDMA_CR_RUNSTOP_MASK BIT(0)
207#define XILINX_MCDMA_IRQ_IOC_MASK BIT(5)
208#define XILINX_MCDMA_IRQ_DELAY_MASK BIT(6)
209#define XILINX_MCDMA_IRQ_ERR_MASK BIT(7)
210#define XILINX_MCDMA_BD_EOP BIT(30)
211#define XILINX_MCDMA_BD_SOP BIT(31)
212
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530213/**
214 * struct xilinx_vdma_desc_hw - Hardware Descriptor
215 * @next_desc: Next Descriptor Pointer @0x00
216 * @pad1: Reserved @0x04
217 * @buf_addr: Buffer address @0x08
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530218 * @buf_addr_msb: MSB of Buffer address @0x0C
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530219 * @vsize: Vertical Size @0x10
220 * @hsize: Horizontal Size @0x14
221 * @stride: Number of bytes between the first
222 * pixels of each horizontal line @0x18
223 */
224struct xilinx_vdma_desc_hw {
225 u32 next_desc;
226 u32 pad1;
227 u32 buf_addr;
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530228 u32 buf_addr_msb;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530229 u32 vsize;
230 u32 hsize;
231 u32 stride;
232} __aligned(64);
233
234/**
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530235 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
236 * @next_desc: Next Descriptor Pointer @0x00
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +0530237 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530238 * @buf_addr: Buffer address @0x08
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +0530239 * @buf_addr_msb: MSB of Buffer address @0x0C
Radhey Shyam Pandeybcb2dc72019-10-22 22:30:20 +0530240 * @reserved1: Reserved @0x10
241 * @reserved2: Reserved @0x14
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530242 * @control: Control field @0x18
243 * @status: Status field @0x1C
244 * @app: APP Fields @0x20 - 0x30
245 */
246struct xilinx_axidma_desc_hw {
247 u32 next_desc;
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +0530248 u32 next_desc_msb;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530249 u32 buf_addr;
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +0530250 u32 buf_addr_msb;
Radhey Shyam Pandeybcb2dc72019-10-22 22:30:20 +0530251 u32 reserved1;
252 u32 reserved2;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530253 u32 control;
254 u32 status;
255 u32 app[XILINX_DMA_NUM_APP_WORDS];
256} __aligned(64);
257
258/**
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +0530259 * struct xilinx_aximcdma_desc_hw - Hardware Descriptor for AXI MCDMA
260 * @next_desc: Next Descriptor Pointer @0x00
261 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
262 * @buf_addr: Buffer address @0x08
263 * @buf_addr_msb: MSB of Buffer address @0x0C
264 * @rsvd: Reserved field @0x10
265 * @control: Control Information field @0x14
266 * @status: Status field @0x18
267 * @sideband_status: Status of sideband signals @0x1C
268 * @app: APP Fields @0x20 - 0x30
269 */
270struct xilinx_aximcdma_desc_hw {
271 u32 next_desc;
272 u32 next_desc_msb;
273 u32 buf_addr;
274 u32 buf_addr_msb;
275 u32 rsvd;
276 u32 control;
277 u32 status;
278 u32 sideband_status;
279 u32 app[XILINX_DMA_NUM_APP_WORDS];
280} __aligned(64);
281
282/**
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530283 * struct xilinx_cdma_desc_hw - Hardware Descriptor
284 * @next_desc: Next Descriptor Pointer @0x00
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +0530285 * @next_desc_msb: Next Descriptor Pointer MSB @0x04
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530286 * @src_addr: Source address @0x08
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +0530287 * @src_addr_msb: Source address MSB @0x0C
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530288 * @dest_addr: Destination address @0x10
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +0530289 * @dest_addr_msb: Destination address MSB @0x14
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530290 * @control: Control field @0x18
291 * @status: Status field @0x1C
292 */
293struct xilinx_cdma_desc_hw {
294 u32 next_desc;
Kedareswara rao Appana9791e712016-06-07 19:21:16 +0530295 u32 next_desc_msb;
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530296 u32 src_addr;
Kedareswara rao Appana9791e712016-06-07 19:21:16 +0530297 u32 src_addr_msb;
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530298 u32 dest_addr;
Kedareswara rao Appana9791e712016-06-07 19:21:16 +0530299 u32 dest_addr_msb;
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530300 u32 control;
301 u32 status;
302} __aligned(64);
303
304/**
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530305 * struct xilinx_vdma_tx_segment - Descriptor segment
306 * @hw: Hardware descriptor
307 * @node: Node in the descriptor segments list
308 * @phys: Physical address of segment
309 */
310struct xilinx_vdma_tx_segment {
311 struct xilinx_vdma_desc_hw hw;
312 struct list_head node;
313 dma_addr_t phys;
314} __aligned(64);
315
316/**
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530317 * struct xilinx_axidma_tx_segment - Descriptor segment
318 * @hw: Hardware descriptor
319 * @node: Node in the descriptor segments list
320 * @phys: Physical address of segment
321 */
322struct xilinx_axidma_tx_segment {
323 struct xilinx_axidma_desc_hw hw;
324 struct list_head node;
325 dma_addr_t phys;
326} __aligned(64);
327
328/**
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +0530329 * struct xilinx_aximcdma_tx_segment - Descriptor segment
330 * @hw: Hardware descriptor
331 * @node: Node in the descriptor segments list
332 * @phys: Physical address of segment
333 */
334struct xilinx_aximcdma_tx_segment {
335 struct xilinx_aximcdma_desc_hw hw;
336 struct list_head node;
337 dma_addr_t phys;
338} __aligned(64);
339
340/**
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530341 * struct xilinx_cdma_tx_segment - Descriptor segment
342 * @hw: Hardware descriptor
343 * @node: Node in the descriptor segments list
344 * @phys: Physical address of segment
345 */
346struct xilinx_cdma_tx_segment {
347 struct xilinx_cdma_desc_hw hw;
348 struct list_head node;
349 dma_addr_t phys;
350} __aligned(64);
351
352/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530353 * struct xilinx_dma_tx_descriptor - Per Transaction structure
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530354 * @async_tx: Async transaction descriptor
355 * @segments: TX segments list
356 * @node: Node in the channel descriptors list
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530357 * @cyclic: Check for cyclic transfers.
Nicholas Graumannd8bae212019-10-15 20:18:22 +0530358 * @err: Whether the descriptor has an error.
359 * @residue: Residue of the completed descriptor
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530360 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530361struct xilinx_dma_tx_descriptor {
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530362 struct dma_async_tx_descriptor async_tx;
363 struct list_head segments;
364 struct list_head node;
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530365 bool cyclic;
Nicholas Graumannd8bae212019-10-15 20:18:22 +0530366 bool err;
367 u32 residue;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530368};
369
370/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530371 * struct xilinx_dma_chan - Driver specific DMA channel structure
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530372 * @xdev: Driver specific device structure
373 * @ctrl_offset: Control registers offset
374 * @desc_offset: TX descriptor registers offset
375 * @lock: Descriptor operation lock
376 * @pending_list: Descriptors waiting
Kedareswara rao Appana7096f362016-02-26 19:33:51 +0530377 * @active_list: Descriptors ready to submit
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530378 * @done_list: Complete descriptors
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530379 * @free_seg_list: Free descriptors
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530380 * @common: DMA common channel
381 * @desc_pool: Descriptors pool
382 * @dev: The dma device
383 * @irq: Channel IRQ
384 * @id: Channel ID
385 * @direction: Transfer direction
386 * @num_frms: Number of frames
387 * @has_sg: Support scatter transfers
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530388 * @cyclic: Check for cyclic transfers.
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530389 * @genlock: Support genlock mode
390 * @err: Channel has errors
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +0530391 * @idle: Check for channel idle
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530392 * @tasklet: Cleanup work after irq
393 * @config: Device configuration info
394 * @flush_on_fsync: Flush on Frame sync
Kedareswara rao Appana7096f362016-02-26 19:33:51 +0530395 * @desc_pendingcount: Descriptor pending count
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530396 * @ext_addr: Indicates 64 bit addressing is supported by dma channel
Kedareswara rao Appanaa65cf5122016-04-06 10:38:09 +0530397 * @desc_submitcount: Descriptor h/w submitted count
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530398 * @seg_v: Statically allocated segments base
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +0530399 * @seg_mv: Statically allocated segments base for MCDMA
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530400 * @seg_p: Physical allocated segments base
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530401 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530402 * @cyclic_seg_p: Physical allocated segments base for cyclic dma
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530403 * @start_transfer: Differentiate b/w DMA IP's transfer
Akinobu Mita676f9c22017-03-14 00:59:11 +0900404 * @stop_transfer: Differentiate b/w DMA IP's quiesce
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +0530405 * @tdest: TDEST value for mcdma
Radhey Shyam Pandey0894aa22018-06-13 13:04:48 +0530406 * @has_vflip: S2MM vertical flip
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530407 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530408struct xilinx_dma_chan {
409 struct xilinx_dma_device *xdev;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530410 u32 ctrl_offset;
411 u32 desc_offset;
412 spinlock_t lock;
413 struct list_head pending_list;
Kedareswara rao Appana7096f362016-02-26 19:33:51 +0530414 struct list_head active_list;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530415 struct list_head done_list;
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530416 struct list_head free_seg_list;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530417 struct dma_chan common;
418 struct dma_pool *desc_pool;
419 struct device *dev;
420 int irq;
421 int id;
422 enum dma_transfer_direction direction;
423 int num_frms;
424 bool has_sg;
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530425 bool cyclic;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530426 bool genlock;
427 bool err;
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +0530428 bool idle;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530429 struct tasklet_struct tasklet;
430 struct xilinx_vdma_config config;
431 bool flush_on_fsync;
Kedareswara rao Appana7096f362016-02-26 19:33:51 +0530432 u32 desc_pendingcount;
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530433 bool ext_addr;
Kedareswara rao Appanaa65cf5122016-04-06 10:38:09 +0530434 u32 desc_submitcount;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530435 struct xilinx_axidma_tx_segment *seg_v;
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +0530436 struct xilinx_aximcdma_tx_segment *seg_mv;
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530437 dma_addr_t seg_p;
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530438 struct xilinx_axidma_tx_segment *cyclic_seg_v;
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530439 dma_addr_t cyclic_seg_p;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530440 void (*start_transfer)(struct xilinx_dma_chan *chan);
Akinobu Mita676f9c22017-03-14 00:59:11 +0900441 int (*stop_transfer)(struct xilinx_dma_chan *chan);
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +0530442 u16 tdest;
Radhey Shyam Pandey0894aa22018-06-13 13:04:48 +0530443 bool has_vflip;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530444};
445
Lars-Peter Clausenf3ae7d92017-09-05 16:43:49 +0200446/**
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +0530447 * enum xdma_ip_type - DMA IP type.
Lars-Peter Clausenf3ae7d92017-09-05 16:43:49 +0200448 *
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +0530449 * @XDMA_TYPE_AXIDMA: Axi dma ip.
450 * @XDMA_TYPE_CDMA: Axi cdma ip.
451 * @XDMA_TYPE_VDMA: Axi vdma ip.
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +0530452 * @XDMA_TYPE_AXIMCDMA: Axi MCDMA ip.
Lars-Peter Clausenf3ae7d92017-09-05 16:43:49 +0200453 *
454 */
455enum xdma_ip_type {
456 XDMA_TYPE_AXIDMA = 0,
457 XDMA_TYPE_CDMA,
458 XDMA_TYPE_VDMA,
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +0530459 XDMA_TYPE_AXIMCDMA
Lars-Peter Clausenf3ae7d92017-09-05 16:43:49 +0200460};
461
Kedareswara rao Appanafb236662016-05-13 12:33:29 +0530462struct xilinx_dma_config {
463 enum xdma_ip_type dmatype;
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +0530464 int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
465 struct clk **tx_clk, struct clk **txs_clk,
466 struct clk **rx_clk, struct clk **rxs_clk);
Radhey Shyam Pandeyc2f6b672019-10-22 22:30:21 +0530467 irqreturn_t (*irq_handler)(int irq, void *data);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530468};
469
470/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530471 * struct xilinx_dma_device - DMA device structure
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530472 * @regs: I/O mapped base address
473 * @dev: Device Structure
474 * @common: DMA device structure
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530475 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530476 * @flush_on_fsync: Flush on frame sync
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530477 * @ext_addr: Indicates 64 bit addressing is supported by dma device
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +0530478 * @pdev: Platform device structure pointer
Kedareswara rao Appanafb236662016-05-13 12:33:29 +0530479 * @dma_config: DMA config structure
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +0530480 * @axi_clk: DMA Axi4-lite interace clock
481 * @tx_clk: DMA mm2s clock
482 * @txs_clk: DMA mm2s stream clock
483 * @rx_clk: DMA s2mm clock
484 * @rxs_clk: DMA s2mm stream clock
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +0530485 * @nr_channels: Number of channels DMA device supports
486 * @chan_id: DMA channel identifier
Andrea Merello616f0f82018-11-20 16:31:45 +0100487 * @max_buffer_len: Max buffer length
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +0530488 * @s2mm_index: S2MM channel index
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530489 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530490struct xilinx_dma_device {
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530491 void __iomem *regs;
492 struct device *dev;
493 struct dma_device common;
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530494 struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530495 u32 flush_on_fsync;
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530496 bool ext_addr;
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +0530497 struct platform_device *pdev;
Kedareswara rao Appanafb236662016-05-13 12:33:29 +0530498 const struct xilinx_dma_config *dma_config;
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +0530499 struct clk *axi_clk;
500 struct clk *tx_clk;
501 struct clk *txs_clk;
502 struct clk *rx_clk;
503 struct clk *rxs_clk;
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +0530504 u32 nr_channels;
505 u32 chan_id;
Andrea Merello616f0f82018-11-20 16:31:45 +0100506 u32 max_buffer_len;
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +0530507 u32 s2mm_index;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530508};
509
510/* Macros */
511#define to_xilinx_chan(chan) \
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530512 container_of(chan, struct xilinx_dma_chan, common)
513#define to_dma_tx_descriptor(tx) \
514 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
515#define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
Kedareswara rao Appana9495f262016-02-26 19:33:54 +0530516 readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
517 cond, delay_us, timeout_us)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530518
519/* IO accessors */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530520static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530521{
522 return ioread32(chan->xdev->regs + reg);
523}
524
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530525static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530526{
527 iowrite32(value, chan->xdev->regs + reg);
528}
529
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530530static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530531 u32 value)
532{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530533 dma_write(chan, chan->desc_offset + reg, value);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530534}
535
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530536static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530537{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530538 return dma_read(chan, chan->ctrl_offset + reg);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530539}
540
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530541static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530542 u32 value)
543{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530544 dma_write(chan, chan->ctrl_offset + reg, value);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530545}
546
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530547static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530548 u32 clr)
549{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530550 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530551}
552
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530553static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530554 u32 set)
555{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530556 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530557}
558
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530559/**
560 * vdma_desc_write_64 - 64-bit descriptor write
561 * @chan: Driver specific VDMA channel
562 * @reg: Register to write
563 * @value_lsb: lower address of the descriptor.
564 * @value_msb: upper address of the descriptor.
565 *
566 * Since vdma driver is trying to write to a register offset which is not a
567 * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
568 * instead of a single 64 bit register write.
569 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530570static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530571 u32 value_lsb, u32 value_msb)
572{
573 /* Write the lsb 32 bits*/
574 writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
575
576 /* Write the msb 32 bits */
577 writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530578}
579
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +0530580static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
581{
582 lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
583}
584
585static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
586 dma_addr_t addr)
587{
588 if (chan->ext_addr)
589 dma_writeq(chan, reg, addr);
590 else
591 dma_ctrl_write(chan, reg, addr);
592}
593
594static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
595 struct xilinx_axidma_desc_hw *hw,
596 dma_addr_t buf_addr, size_t sg_used,
597 size_t period_len)
598{
599 if (chan->ext_addr) {
600 hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
601 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
602 period_len);
603 } else {
604 hw->buf_addr = buf_addr + sg_used + period_len;
605 }
606}
607
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +0530608static inline void xilinx_aximcdma_buf(struct xilinx_dma_chan *chan,
609 struct xilinx_aximcdma_desc_hw *hw,
610 dma_addr_t buf_addr, size_t sg_used)
611{
612 if (chan->ext_addr) {
613 hw->buf_addr = lower_32_bits(buf_addr + sg_used);
614 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used);
615 } else {
616 hw->buf_addr = buf_addr + sg_used;
617 }
618}
619
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530620/* -----------------------------------------------------------------------------
621 * Descriptors and segments alloc and free
622 */
623
624/**
625 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530626 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530627 *
628 * Return: The allocated segment on success and NULL on failure.
629 */
630static struct xilinx_vdma_tx_segment *
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530631xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530632{
633 struct xilinx_vdma_tx_segment *segment;
634 dma_addr_t phys;
635
Julia Lawall2ba4f8a2016-04-29 22:09:09 +0200636 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530637 if (!segment)
638 return NULL;
639
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530640 segment->phys = phys;
641
642 return segment;
643}
644
645/**
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530646 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
647 * @chan: Driver specific DMA channel
648 *
649 * Return: The allocated segment on success and NULL on failure.
650 */
651static struct xilinx_cdma_tx_segment *
652xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
653{
654 struct xilinx_cdma_tx_segment *segment;
655 dma_addr_t phys;
656
Kedareswara rao Appana62147862016-05-18 13:17:31 +0530657 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530658 if (!segment)
659 return NULL;
660
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530661 segment->phys = phys;
662
663 return segment;
664}
665
666/**
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530667 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
668 * @chan: Driver specific DMA channel
669 *
670 * Return: The allocated segment on success and NULL on failure.
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530671 */
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530672static struct xilinx_axidma_tx_segment *
673xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
674{
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530675 struct xilinx_axidma_tx_segment *segment = NULL;
676 unsigned long flags;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530677
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530678 spin_lock_irqsave(&chan->lock, flags);
679 if (!list_empty(&chan->free_seg_list)) {
680 segment = list_first_entry(&chan->free_seg_list,
681 struct xilinx_axidma_tx_segment,
682 node);
683 list_del(&segment->node);
684 }
685 spin_unlock_irqrestore(&chan->lock, flags);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530686
Nicholas Graumann722b9e6d2019-10-15 20:18:23 +0530687 if (!segment)
688 dev_dbg(chan->dev, "Could not find free tx segment\n");
689
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530690 return segment;
691}
692
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +0530693/**
694 * xilinx_aximcdma_alloc_tx_segment - Allocate transaction segment
695 * @chan: Driver specific DMA channel
696 *
697 * Return: The allocated segment on success and NULL on failure.
698 */
699static struct xilinx_aximcdma_tx_segment *
700xilinx_aximcdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
701{
702 struct xilinx_aximcdma_tx_segment *segment = NULL;
703 unsigned long flags;
704
705 spin_lock_irqsave(&chan->lock, flags);
706 if (!list_empty(&chan->free_seg_list)) {
707 segment = list_first_entry(&chan->free_seg_list,
708 struct xilinx_aximcdma_tx_segment,
709 node);
710 list_del(&segment->node);
711 }
712 spin_unlock_irqrestore(&chan->lock, flags);
713
714 return segment;
715}
716
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530717static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
718{
719 u32 next_desc = hw->next_desc;
720 u32 next_desc_msb = hw->next_desc_msb;
721
722 memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
723
724 hw->next_desc = next_desc;
725 hw->next_desc_msb = next_desc_msb;
726}
727
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +0530728static void xilinx_mcdma_clean_hw_desc(struct xilinx_aximcdma_desc_hw *hw)
729{
730 u32 next_desc = hw->next_desc;
731 u32 next_desc_msb = hw->next_desc_msb;
732
733 memset(hw, 0, sizeof(struct xilinx_aximcdma_desc_hw));
734
735 hw->next_desc = next_desc;
736 hw->next_desc_msb = next_desc_msb;
737}
738
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530739/**
740 * xilinx_dma_free_tx_segment - Free transaction segment
741 * @chan: Driver specific DMA channel
742 * @segment: DMA transaction segment
743 */
744static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
745 struct xilinx_axidma_tx_segment *segment)
746{
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530747 xilinx_dma_clean_hw_desc(&segment->hw);
748
749 list_add_tail(&segment->node, &chan->free_seg_list);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530750}
751
752/**
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +0530753 * xilinx_mcdma_free_tx_segment - Free transaction segment
754 * @chan: Driver specific DMA channel
755 * @segment: DMA transaction segment
756 */
757static void xilinx_mcdma_free_tx_segment(struct xilinx_dma_chan *chan,
758 struct xilinx_aximcdma_tx_segment *
759 segment)
760{
761 xilinx_mcdma_clean_hw_desc(&segment->hw);
762
763 list_add_tail(&segment->node, &chan->free_seg_list);
764}
765
766/**
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530767 * xilinx_cdma_free_tx_segment - Free transaction segment
768 * @chan: Driver specific DMA channel
769 * @segment: DMA transaction segment
770 */
771static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
772 struct xilinx_cdma_tx_segment *segment)
773{
774 dma_pool_free(chan->desc_pool, segment, segment->phys);
775}
776
777/**
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530778 * xilinx_vdma_free_tx_segment - Free transaction segment
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530779 * @chan: Driver specific DMA channel
780 * @segment: DMA transaction segment
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530781 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530782static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530783 struct xilinx_vdma_tx_segment *segment)
784{
785 dma_pool_free(chan->desc_pool, segment, segment->phys);
786}
787
788/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530789 * xilinx_dma_tx_descriptor - Allocate transaction descriptor
790 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530791 *
792 * Return: The allocated descriptor on success and NULL on failure.
793 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530794static struct xilinx_dma_tx_descriptor *
795xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530796{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530797 struct xilinx_dma_tx_descriptor *desc;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530798
799 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
800 if (!desc)
801 return NULL;
802
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530803 INIT_LIST_HEAD(&desc->segments);
804
805 return desc;
806}
807
808/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530809 * xilinx_dma_free_tx_descriptor - Free transaction descriptor
810 * @chan: Driver specific DMA channel
811 * @desc: DMA transaction descriptor
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530812 */
813static void
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530814xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
815 struct xilinx_dma_tx_descriptor *desc)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530816{
817 struct xilinx_vdma_tx_segment *segment, *next;
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530818 struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530819 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +0530820 struct xilinx_aximcdma_tx_segment *aximcdma_segment, *aximcdma_next;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530821
822 if (!desc)
823 return;
824
Kedareswara rao Appanafb236662016-05-13 12:33:29 +0530825 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530826 list_for_each_entry_safe(segment, next, &desc->segments, node) {
827 list_del(&segment->node);
828 xilinx_vdma_free_tx_segment(chan, segment);
829 }
Kedareswara rao Appanafb236662016-05-13 12:33:29 +0530830 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530831 list_for_each_entry_safe(cdma_segment, cdma_next,
832 &desc->segments, node) {
833 list_del(&cdma_segment->node);
834 xilinx_cdma_free_tx_segment(chan, cdma_segment);
835 }
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +0530836 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530837 list_for_each_entry_safe(axidma_segment, axidma_next,
838 &desc->segments, node) {
839 list_del(&axidma_segment->node);
840 xilinx_dma_free_tx_segment(chan, axidma_segment);
841 }
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +0530842 } else {
843 list_for_each_entry_safe(aximcdma_segment, aximcdma_next,
844 &desc->segments, node) {
845 list_del(&aximcdma_segment->node);
846 xilinx_mcdma_free_tx_segment(chan, aximcdma_segment);
847 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530848 }
849
850 kfree(desc);
851}
852
853/* Required functions */
854
855/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530856 * xilinx_dma_free_desc_list - Free descriptors list
857 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530858 * @list: List to parse and delete the descriptor
859 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530860static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530861 struct list_head *list)
862{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530863 struct xilinx_dma_tx_descriptor *desc, *next;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530864
865 list_for_each_entry_safe(desc, next, list, node) {
866 list_del(&desc->node);
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530867 xilinx_dma_free_tx_descriptor(chan, desc);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530868 }
869}
870
871/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530872 * xilinx_dma_free_descriptors - Free channel descriptors
873 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530874 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530875static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530876{
877 unsigned long flags;
878
879 spin_lock_irqsave(&chan->lock, flags);
880
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530881 xilinx_dma_free_desc_list(chan, &chan->pending_list);
882 xilinx_dma_free_desc_list(chan, &chan->done_list);
883 xilinx_dma_free_desc_list(chan, &chan->active_list);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530884
885 spin_unlock_irqrestore(&chan->lock, flags);
886}
887
888/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530889 * xilinx_dma_free_chan_resources - Free channel resources
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530890 * @dchan: DMA channel
891 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530892static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530893{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530894 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530895 unsigned long flags;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530896
897 dev_dbg(chan->dev, "Free all channel resources.\n");
898
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530899 xilinx_dma_free_descriptors(chan);
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530900
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530901 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530902 spin_lock_irqsave(&chan->lock, flags);
903 INIT_LIST_HEAD(&chan->free_seg_list);
904 spin_unlock_irqrestore(&chan->lock, flags);
905
Kedareswara rao Appana0e847d42018-01-03 12:12:11 +0530906 /* Free memory that is allocated for BD */
907 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
908 XILINX_DMA_NUM_DESCS, chan->seg_v,
909 chan->seg_p);
910
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530911 /* Free Memory that is allocated for cyclic DMA Mode */
912 dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
913 chan->cyclic_seg_v, chan->cyclic_seg_p);
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530914 }
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530915
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +0530916 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
917 spin_lock_irqsave(&chan->lock, flags);
918 INIT_LIST_HEAD(&chan->free_seg_list);
919 spin_unlock_irqrestore(&chan->lock, flags);
920
921 /* Free memory that is allocated for BD */
922 dma_free_coherent(chan->dev, sizeof(*chan->seg_mv) *
923 XILINX_DMA_NUM_DESCS, chan->seg_mv,
924 chan->seg_p);
925 }
926
927 if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA &&
928 chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA) {
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530929 dma_pool_destroy(chan->desc_pool);
930 chan->desc_pool = NULL;
931 }
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +0530932
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530933}
934
935/**
Nicholas Graumanna575d0b2019-10-15 20:18:21 +0530936 * xilinx_dma_get_residue - Compute residue for a given descriptor
937 * @chan: Driver specific dma channel
938 * @desc: dma transaction descriptor
939 *
940 * Return: The number of residue bytes for the descriptor.
941 */
942static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
943 struct xilinx_dma_tx_descriptor *desc)
944{
945 struct xilinx_cdma_tx_segment *cdma_seg;
946 struct xilinx_axidma_tx_segment *axidma_seg;
947 struct xilinx_cdma_desc_hw *cdma_hw;
948 struct xilinx_axidma_desc_hw *axidma_hw;
949 struct list_head *entry;
950 u32 residue = 0;
951
952 list_for_each(entry, &desc->segments) {
953 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
954 cdma_seg = list_entry(entry,
955 struct xilinx_cdma_tx_segment,
956 node);
957 cdma_hw = &cdma_seg->hw;
958 residue += (cdma_hw->control - cdma_hw->status) &
959 chan->xdev->max_buffer_len;
960 } else {
961 axidma_seg = list_entry(entry,
962 struct xilinx_axidma_tx_segment,
963 node);
964 axidma_hw = &axidma_seg->hw;
965 residue += (axidma_hw->control - axidma_hw->status) &
966 chan->xdev->max_buffer_len;
967 }
968 }
969
970 return residue;
971}
972
973/**
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530974 * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
975 * @chan: Driver specific dma channel
976 * @desc: dma transaction descriptor
977 * @flags: flags for spin lock
978 */
979static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
980 struct xilinx_dma_tx_descriptor *desc,
981 unsigned long *flags)
982{
983 dma_async_tx_callback callback;
984 void *callback_param;
985
986 callback = desc->async_tx.callback;
987 callback_param = desc->async_tx.callback_param;
988 if (callback) {
989 spin_unlock_irqrestore(&chan->lock, *flags);
990 callback(callback_param);
991 spin_lock_irqsave(&chan->lock, *flags);
992 }
993}
994
995/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530996 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
997 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530998 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530999static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301000{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301001 struct xilinx_dma_tx_descriptor *desc, *next;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301002 unsigned long flags;
1003
1004 spin_lock_irqsave(&chan->lock, flags);
1005
1006 list_for_each_entry_safe(desc, next, &chan->done_list, node) {
Nicholas Graumannd8bae212019-10-15 20:18:22 +05301007 struct dmaengine_result result;
1008
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301009 if (desc->cyclic) {
1010 xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
1011 break;
1012 }
1013
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301014 /* Remove from the list of running transactions */
1015 list_del(&desc->node);
1016
Nicholas Graumannd8bae212019-10-15 20:18:22 +05301017 if (unlikely(desc->err)) {
1018 if (chan->direction == DMA_DEV_TO_MEM)
1019 result.result = DMA_TRANS_READ_FAILED;
1020 else
1021 result.result = DMA_TRANS_WRITE_FAILED;
1022 } else {
1023 result.result = DMA_TRANS_NOERROR;
1024 }
1025
1026 result.residue = desc->residue;
1027
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301028 /* Run the link descriptor callback function */
Radhey Shyam Pandey005a0172019-10-15 20:18:18 +05301029 spin_unlock_irqrestore(&chan->lock, flags);
Nicholas Graumannd8bae212019-10-15 20:18:22 +05301030 dmaengine_desc_get_callback_invoke(&desc->async_tx, &result);
Radhey Shyam Pandey005a0172019-10-15 20:18:18 +05301031 spin_lock_irqsave(&chan->lock, flags);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301032
1033 /* Run any dependencies, then free the descriptor */
1034 dma_run_dependencies(&desc->async_tx);
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301035 xilinx_dma_free_tx_descriptor(chan, desc);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301036 }
1037
1038 spin_unlock_irqrestore(&chan->lock, flags);
1039}
1040
1041/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301042 * xilinx_dma_do_tasklet - Schedule completion tasklet
1043 * @data: Pointer to the Xilinx DMA channel structure
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301044 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301045static void xilinx_dma_do_tasklet(unsigned long data)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301046{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301047 struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301048
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301049 xilinx_dma_chan_desc_cleanup(chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301050}
1051
1052/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301053 * xilinx_dma_alloc_chan_resources - Allocate channel resources
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301054 * @dchan: DMA channel
1055 *
1056 * Return: '0' on success and failure value on error
1057 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301058static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301059{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301060 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
Kedareswara rao Appana23059402017-12-07 10:51:04 +05301061 int i;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301062
1063 /* Has this channel already been allocated? */
1064 if (chan->desc_pool)
1065 return 0;
1066
1067 /*
1068 * We need the descriptor to be aligned to 64bytes
1069 * for meeting Xilinx VDMA specification requirement.
1070 */
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05301071 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
Kedareswara rao Appana23059402017-12-07 10:51:04 +05301072 /* Allocate the buffer descriptors. */
Luis Chamberlain750afb02019-01-04 09:23:09 +01001073 chan->seg_v = dma_alloc_coherent(chan->dev,
1074 sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS,
1075 &chan->seg_p, GFP_KERNEL);
Kedareswara rao Appana23059402017-12-07 10:51:04 +05301076 if (!chan->seg_v) {
1077 dev_err(chan->dev,
1078 "unable to allocate channel %d descriptors\n",
1079 chan->id);
1080 return -ENOMEM;
1081 }
Radhey Shyam Pandey91b43822018-09-29 11:17:57 -06001082 /*
1083 * For cyclic DMA mode we need to program the tail Descriptor
1084 * register with a value which is not a part of the BD chain
1085 * so allocating a desc segment during channel allocation for
1086 * programming tail descriptor.
1087 */
Luis Chamberlain750afb02019-01-04 09:23:09 +01001088 chan->cyclic_seg_v = dma_alloc_coherent(chan->dev,
1089 sizeof(*chan->cyclic_seg_v),
1090 &chan->cyclic_seg_p,
1091 GFP_KERNEL);
Radhey Shyam Pandey91b43822018-09-29 11:17:57 -06001092 if (!chan->cyclic_seg_v) {
1093 dev_err(chan->dev,
1094 "unable to allocate desc segment for cyclic DMA\n");
1095 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
1096 XILINX_DMA_NUM_DESCS, chan->seg_v,
1097 chan->seg_p);
1098 return -ENOMEM;
1099 }
1100 chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
Kedareswara rao Appana23059402017-12-07 10:51:04 +05301101
1102 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
1103 chan->seg_v[i].hw.next_desc =
1104 lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
1105 ((i + 1) % XILINX_DMA_NUM_DESCS));
1106 chan->seg_v[i].hw.next_desc_msb =
1107 upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
1108 ((i + 1) % XILINX_DMA_NUM_DESCS));
1109 chan->seg_v[i].phys = chan->seg_p +
1110 sizeof(*chan->seg_v) * i;
1111 list_add_tail(&chan->seg_v[i].node,
1112 &chan->free_seg_list);
1113 }
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +05301114 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
1115 /* Allocate the buffer descriptors. */
1116 chan->seg_mv = dma_alloc_coherent(chan->dev,
1117 sizeof(*chan->seg_mv) *
1118 XILINX_DMA_NUM_DESCS,
1119 &chan->seg_p, GFP_KERNEL);
1120 if (!chan->seg_mv) {
1121 dev_err(chan->dev,
1122 "unable to allocate channel %d descriptors\n",
1123 chan->id);
1124 return -ENOMEM;
1125 }
1126 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
1127 chan->seg_mv[i].hw.next_desc =
1128 lower_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
1129 ((i + 1) % XILINX_DMA_NUM_DESCS));
1130 chan->seg_mv[i].hw.next_desc_msb =
1131 upper_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
1132 ((i + 1) % XILINX_DMA_NUM_DESCS));
1133 chan->seg_mv[i].phys = chan->seg_p +
1134 sizeof(*chan->seg_v) * i;
1135 list_add_tail(&chan->seg_mv[i].node,
1136 &chan->free_seg_list);
1137 }
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05301138 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301139 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
1140 chan->dev,
1141 sizeof(struct xilinx_cdma_tx_segment),
1142 __alignof__(struct xilinx_cdma_tx_segment),
1143 0);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301144 } else {
1145 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
1146 chan->dev,
1147 sizeof(struct xilinx_vdma_tx_segment),
1148 __alignof__(struct xilinx_vdma_tx_segment),
1149 0);
1150 }
1151
Kedareswara rao Appana23059402017-12-07 10:51:04 +05301152 if (!chan->desc_pool &&
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +05301153 ((chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) &&
1154 chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA)) {
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301155 dev_err(chan->dev,
1156 "unable to allocate channel %d descriptor pool\n",
1157 chan->id);
1158 return -ENOMEM;
1159 }
1160
1161 dma_cookie_init(dchan);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301162
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05301163 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301164 /* For AXI DMA resetting once channel will reset the
1165 * other channel as well so enable the interrupts here.
1166 */
1167 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1168 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1169 }
1170
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05301171 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301172 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1173 XILINX_CDMA_CR_SGMODE);
1174
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301175 return 0;
1176}
1177
1178/**
Andrea Merello616f0f82018-11-20 16:31:45 +01001179 * xilinx_dma_calc_copysize - Calculate the amount of data to copy
1180 * @chan: Driver specific DMA channel
1181 * @size: Total data that needs to be copied
1182 * @done: Amount of data that has been already copied
1183 *
1184 * Return: Amount of data that has to be copied
1185 */
1186static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan,
1187 int size, int done)
1188{
1189 size_t copy;
1190
1191 copy = min_t(size_t, size - done,
1192 chan->xdev->max_buffer_len);
1193
Andrea Merello5c094d42018-11-20 16:31:46 +01001194 if ((copy + done < size) &&
1195 chan->xdev->common.copy_align) {
1196 /*
1197 * If this is not the last descriptor, make sure
1198 * the next one will be properly aligned
1199 */
1200 copy = rounddown(copy,
1201 (1 << chan->xdev->common.copy_align));
1202 }
Andrea Merello616f0f82018-11-20 16:31:45 +01001203 return copy;
1204}
1205
1206/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301207 * xilinx_dma_tx_status - Get DMA transaction status
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301208 * @dchan: DMA channel
1209 * @cookie: Transaction identifier
1210 * @txstate: Transaction state
1211 *
1212 * Return: DMA transaction status
1213 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301214static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301215 dma_cookie_t cookie,
1216 struct dma_tx_state *txstate)
1217{
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301218 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1219 struct xilinx_dma_tx_descriptor *desc;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301220 enum dma_status ret;
1221 unsigned long flags;
1222 u32 residue = 0;
1223
1224 ret = dma_cookie_status(dchan, cookie, txstate);
1225 if (ret == DMA_COMPLETE || !txstate)
1226 return ret;
1227
Nicholas Graumanna575d0b2019-10-15 20:18:21 +05301228 spin_lock_irqsave(&chan->lock, flags);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301229
Nicholas Graumanna575d0b2019-10-15 20:18:21 +05301230 desc = list_last_entry(&chan->active_list,
1231 struct xilinx_dma_tx_descriptor, node);
1232 /*
1233 * VDMA and simple mode do not support residue reporting, so the
1234 * residue field will always be 0.
1235 */
1236 if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
1237 residue = xilinx_dma_get_residue(chan, desc);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301238
Nicholas Graumanna575d0b2019-10-15 20:18:21 +05301239 spin_unlock_irqrestore(&chan->lock, flags);
1240
1241 dma_set_residue(txstate, residue);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301242
1243 return ret;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301244}
1245
1246/**
Akinobu Mita676f9c22017-03-14 00:59:11 +09001247 * xilinx_dma_stop_transfer - Halt DMA channel
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301248 * @chan: Driver specific DMA channel
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +05301249 *
1250 * Return: '0' on success and failure value on error
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301251 */
Akinobu Mita676f9c22017-03-14 00:59:11 +09001252static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301253{
Kedareswara rao Appana9495f262016-02-26 19:33:54 +05301254 u32 val;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301255
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301256 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301257
1258 /* Wait for the hardware to halt */
Akinobu Mita676f9c22017-03-14 00:59:11 +09001259 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1260 val & XILINX_DMA_DMASR_HALTED, 0,
1261 XILINX_DMA_LOOP_COUNT);
1262}
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301263
Akinobu Mita676f9c22017-03-14 00:59:11 +09001264/**
1265 * xilinx_cdma_stop_transfer - Wait for the current transfer to complete
1266 * @chan: Driver specific DMA channel
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +05301267 *
1268 * Return: '0' on success and failure value on error
Akinobu Mita676f9c22017-03-14 00:59:11 +09001269 */
1270static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
1271{
1272 u32 val;
1273
1274 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1275 val & XILINX_DMA_DMASR_IDLE, 0,
1276 XILINX_DMA_LOOP_COUNT);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301277}
1278
1279/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301280 * xilinx_dma_start - Start DMA channel
1281 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301282 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301283static void xilinx_dma_start(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301284{
Kedareswara rao Appana69490632016-03-03 23:02:42 +05301285 int err;
Kedareswara rao Appana9495f262016-02-26 19:33:54 +05301286 u32 val;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301287
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301288 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301289
1290 /* Wait for the hardware to start */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301291 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1292 !(val & XILINX_DMA_DMASR_HALTED), 0,
1293 XILINX_DMA_LOOP_COUNT);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301294
Kedareswara rao Appana9495f262016-02-26 19:33:54 +05301295 if (err) {
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301296 dev_err(chan->dev, "Cannot start channel %p: %x\n",
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301297 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301298
1299 chan->err = true;
1300 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301301}
1302
1303/**
1304 * xilinx_vdma_start_transfer - Starts VDMA transfer
1305 * @chan: Driver specific channel struct pointer
1306 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301307static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301308{
1309 struct xilinx_vdma_config *config = &chan->config;
Vinod Koulf935d7d2019-05-21 19:36:44 +05301310 struct xilinx_dma_tx_descriptor *desc;
Kedareswara rao Appanafe0503e2017-12-07 10:51:03 +05301311 u32 reg, j;
Andrea Merellob8349172018-11-20 16:31:51 +01001312 struct xilinx_vdma_tx_segment *segment, *last = NULL;
1313 int i = 0;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301314
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301315 /* This function was invoked with lock held */
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301316 if (chan->err)
1317 return;
1318
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05301319 if (!chan->idle)
1320 return;
1321
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301322 if (list_empty(&chan->pending_list))
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301323 return;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301324
1325 desc = list_first_entry(&chan->pending_list,
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301326 struct xilinx_dma_tx_descriptor, node);
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301327
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301328 /* Configure the hardware using info in the config structure */
Radhey Shyam Pandey0894aa22018-06-13 13:04:48 +05301329 if (chan->has_vflip) {
1330 reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
1331 reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP;
1332 reg |= config->vflip_en;
1333 dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP,
1334 reg);
1335 }
1336
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301337 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301338
1339 if (config->frm_cnt_en)
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301340 reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301341 else
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301342 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301343
Andrea Merellob8349172018-11-20 16:31:51 +01001344 /* If not parking, enable circular mode */
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301345 if (config->park)
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301346 reg &= ~XILINX_DMA_DMACR_CIRC_EN;
Andrea Merellob8349172018-11-20 16:31:51 +01001347 else
1348 reg |= XILINX_DMA_DMACR_CIRC_EN;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301349
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301350 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301351
Kedareswara rao Appanafe0503e2017-12-07 10:51:03 +05301352 j = chan->desc_submitcount;
1353 reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
1354 if (chan->direction == DMA_MEM_TO_DEV) {
1355 reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
1356 reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
1357 } else {
1358 reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
1359 reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301360 }
Kedareswara rao Appanafe0503e2017-12-07 10:51:03 +05301361 dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301362
1363 /* Start the hardware */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301364 xilinx_dma_start(chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301365
1366 if (chan->err)
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301367 return;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301368
1369 /* Start the transfer */
Andrea Merellob8349172018-11-20 16:31:51 +01001370 if (chan->desc_submitcount < chan->num_frms)
1371 i = chan->desc_submitcount;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301372
Andrea Merellob8349172018-11-20 16:31:51 +01001373 list_for_each_entry(segment, &desc->segments, node) {
1374 if (chan->ext_addr)
1375 vdma_desc_write_64(chan,
1376 XILINX_VDMA_REG_START_ADDRESS_64(i++),
1377 segment->hw.buf_addr,
1378 segment->hw.buf_addr_msb);
1379 else
1380 vdma_desc_write(chan,
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301381 XILINX_VDMA_REG_START_ADDRESS(i++),
1382 segment->hw.buf_addr);
Kedareswara rao Appanab72db402016-04-06 10:38:08 +05301383
Andrea Merellob8349172018-11-20 16:31:51 +01001384 last = segment;
Kedareswara rao Appanaa65cf5122016-04-06 10:38:09 +05301385 }
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05301386
Andrea Merellob8349172018-11-20 16:31:51 +01001387 if (!last)
1388 return;
1389
1390 /* HW expects these parameters to be same for one transaction */
1391 vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
1392 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
1393 last->hw.stride);
1394 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
1395
1396 chan->desc_submitcount++;
1397 chan->desc_pendingcount--;
1398 list_del(&desc->node);
1399 list_add_tail(&desc->node, &chan->active_list);
1400 if (chan->desc_submitcount == chan->num_frms)
1401 chan->desc_submitcount = 0;
1402
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05301403 chan->idle = false;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301404}
1405
1406/**
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301407 * xilinx_cdma_start_transfer - Starts cdma transfer
1408 * @chan: Driver specific channel struct pointer
1409 */
1410static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1411{
1412 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1413 struct xilinx_cdma_tx_segment *tail_segment;
1414 u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
1415
1416 if (chan->err)
1417 return;
1418
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05301419 if (!chan->idle)
1420 return;
1421
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301422 if (list_empty(&chan->pending_list))
1423 return;
1424
1425 head_desc = list_first_entry(&chan->pending_list,
1426 struct xilinx_dma_tx_descriptor, node);
1427 tail_desc = list_last_entry(&chan->pending_list,
1428 struct xilinx_dma_tx_descriptor, node);
1429 tail_segment = list_last_entry(&tail_desc->segments,
1430 struct xilinx_cdma_tx_segment, node);
1431
1432 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1433 ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1434 ctrl_reg |= chan->desc_pendingcount <<
1435 XILINX_DMA_CR_COALESCE_SHIFT;
1436 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
1437 }
1438
1439 if (chan->has_sg) {
Kedareswara rao Appana48c62fb2018-01-03 12:12:09 +05301440 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
1441 XILINX_CDMA_CR_SGMODE);
1442
1443 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1444 XILINX_CDMA_CR_SGMODE);
1445
Kedareswara rao Appana9791e712016-06-07 19:21:16 +05301446 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1447 head_desc->async_tx.phys);
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301448
1449 /* Update tail ptr register which will start the transfer */
Kedareswara rao Appana9791e712016-06-07 19:21:16 +05301450 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1451 tail_segment->phys);
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301452 } else {
1453 /* In simple mode */
1454 struct xilinx_cdma_tx_segment *segment;
1455 struct xilinx_cdma_desc_hw *hw;
1456
1457 segment = list_first_entry(&head_desc->segments,
1458 struct xilinx_cdma_tx_segment,
1459 node);
1460
1461 hw = &segment->hw;
1462
Radhey Shyam Pandey0e03aca2018-09-29 11:18:00 -06001463 xilinx_write(chan, XILINX_CDMA_REG_SRCADDR,
1464 xilinx_prep_dma_addr_t(hw->src_addr));
1465 xilinx_write(chan, XILINX_CDMA_REG_DSTADDR,
1466 xilinx_prep_dma_addr_t(hw->dest_addr));
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301467
1468 /* Start the transfer */
1469 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
Andrea Merello616f0f82018-11-20 16:31:45 +01001470 hw->control & chan->xdev->max_buffer_len);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301471 }
1472
1473 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1474 chan->desc_pendingcount = 0;
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05301475 chan->idle = false;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301476}
1477
1478/**
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301479 * xilinx_dma_start_transfer - Starts DMA transfer
1480 * @chan: Driver specific channel struct pointer
1481 */
1482static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1483{
1484 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
Kedareswara rao Appana23059402017-12-07 10:51:04 +05301485 struct xilinx_axidma_tx_segment *tail_segment;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301486 u32 reg;
1487
1488 if (chan->err)
1489 return;
1490
1491 if (list_empty(&chan->pending_list))
1492 return;
1493
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05301494 if (!chan->idle)
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301495 return;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301496
1497 head_desc = list_first_entry(&chan->pending_list,
1498 struct xilinx_dma_tx_descriptor, node);
1499 tail_desc = list_last_entry(&chan->pending_list,
1500 struct xilinx_dma_tx_descriptor, node);
1501 tail_segment = list_last_entry(&tail_desc->segments,
1502 struct xilinx_axidma_tx_segment, node);
1503
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301504 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1505
1506 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1507 reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1508 reg |= chan->desc_pendingcount <<
1509 XILINX_DMA_CR_COALESCE_SHIFT;
1510 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1511 }
1512
Radhey Shyam Pandeybcb2dc72019-10-22 22:30:20 +05301513 if (chan->has_sg)
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +05301514 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1515 head_desc->async_tx.phys);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301516
1517 xilinx_dma_start(chan);
1518
1519 if (chan->err)
1520 return;
1521
1522 /* Start the transfer */
Radhey Shyam Pandeybcb2dc72019-10-22 22:30:20 +05301523 if (chan->has_sg) {
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301524 if (chan->cyclic)
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +05301525 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1526 chan->cyclic_seg_v->phys);
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301527 else
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +05301528 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1529 tail_segment->phys);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301530 } else {
1531 struct xilinx_axidma_tx_segment *segment;
1532 struct xilinx_axidma_desc_hw *hw;
1533
1534 segment = list_first_entry(&head_desc->segments,
1535 struct xilinx_axidma_tx_segment,
1536 node);
1537 hw = &segment->hw;
1538
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +05301539 xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301540
1541 /* Start the transfer */
1542 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
Andrea Merello616f0f82018-11-20 16:31:45 +01001543 hw->control & chan->xdev->max_buffer_len);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301544 }
1545
1546 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1547 chan->desc_pendingcount = 0;
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05301548 chan->idle = false;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301549}
1550
1551/**
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +05301552 * xilinx_mcdma_start_transfer - Starts MCDMA transfer
1553 * @chan: Driver specific channel struct pointer
1554 */
1555static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan)
1556{
1557 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1558 struct xilinx_axidma_tx_segment *tail_segment;
1559 u32 reg;
1560
1561 /*
1562 * lock has been held by calling functions, so we don't need it
1563 * to take it here again.
1564 */
1565
1566 if (chan->err)
1567 return;
1568
1569 if (!chan->idle)
1570 return;
1571
1572 if (list_empty(&chan->pending_list))
1573 return;
1574
1575 head_desc = list_first_entry(&chan->pending_list,
1576 struct xilinx_dma_tx_descriptor, node);
1577 tail_desc = list_last_entry(&chan->pending_list,
1578 struct xilinx_dma_tx_descriptor, node);
1579 tail_segment = list_last_entry(&tail_desc->segments,
1580 struct xilinx_axidma_tx_segment, node);
1581
1582 reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
1583
1584 if (chan->desc_pendingcount <= XILINX_MCDMA_COALESCE_MAX) {
1585 reg &= ~XILINX_MCDMA_COALESCE_MASK;
1586 reg |= chan->desc_pendingcount <<
1587 XILINX_MCDMA_COALESCE_SHIFT;
1588 }
1589
1590 reg |= XILINX_MCDMA_IRQ_ALL_MASK;
1591 dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
1592
1593 /* Program current descriptor */
1594 xilinx_write(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET(chan->tdest),
1595 head_desc->async_tx.phys);
1596
1597 /* Program channel enable register */
1598 reg = dma_ctrl_read(chan, XILINX_MCDMA_CHEN_OFFSET);
1599 reg |= BIT(chan->tdest);
1600 dma_ctrl_write(chan, XILINX_MCDMA_CHEN_OFFSET, reg);
1601
1602 /* Start the fetch of BDs for the channel */
1603 reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
1604 reg |= XILINX_MCDMA_CR_RUNSTOP_MASK;
1605 dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
1606
1607 xilinx_dma_start(chan);
1608
1609 if (chan->err)
1610 return;
1611
1612 /* Start the transfer */
1613 xilinx_write(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET(chan->tdest),
1614 tail_segment->phys);
1615
1616 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1617 chan->desc_pendingcount = 0;
1618 chan->idle = false;
1619}
1620
1621/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301622 * xilinx_dma_issue_pending - Issue pending transactions
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301623 * @dchan: DMA channel
1624 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301625static void xilinx_dma_issue_pending(struct dma_chan *dchan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301626{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301627 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301628 unsigned long flags;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301629
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301630 spin_lock_irqsave(&chan->lock, flags);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301631 chan->start_transfer(chan);
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301632 spin_unlock_irqrestore(&chan->lock, flags);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301633}
1634
1635/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301636 * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301637 * @chan : xilinx DMA channel
1638 *
1639 * CONTEXT: hardirq
1640 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301641static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301642{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301643 struct xilinx_dma_tx_descriptor *desc, *next;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301644
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301645 /* This function was invoked with lock held */
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301646 if (list_empty(&chan->active_list))
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301647 return;
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301648
1649 list_for_each_entry_safe(desc, next, &chan->active_list, node) {
Nicholas Graumannd8bae212019-10-15 20:18:22 +05301650 if (chan->has_sg && chan->xdev->dma_config->dmatype !=
1651 XDMA_TYPE_VDMA)
1652 desc->residue = xilinx_dma_get_residue(chan, desc);
1653 else
1654 desc->residue = 0;
1655 desc->err = chan->err;
1656
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301657 list_del(&desc->node);
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301658 if (!desc->cyclic)
1659 dma_cookie_complete(&desc->async_tx);
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301660 list_add_tail(&desc->node, &chan->done_list);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301661 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301662}
1663
1664/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301665 * xilinx_dma_reset - Reset DMA channel
1666 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301667 *
1668 * Return: '0' on success and failure value on error
1669 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301670static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301671{
Kedareswara rao Appana69490632016-03-03 23:02:42 +05301672 int err;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301673 u32 tmp;
1674
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301675 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301676
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301677 /* Wait for the hardware to finish reset */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301678 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
1679 !(tmp & XILINX_DMA_DMACR_RESET), 0,
1680 XILINX_DMA_LOOP_COUNT);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301681
Kedareswara rao Appana9495f262016-02-26 19:33:54 +05301682 if (err) {
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301683 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301684 dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
1685 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301686 return -ETIMEDOUT;
1687 }
1688
1689 chan->err = false;
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05301690 chan->idle = true;
Nicholas Graumann8a631a52019-10-15 20:18:24 +05301691 chan->desc_pendingcount = 0;
Kedareswara rao Appanafe0503e2017-12-07 10:51:03 +05301692 chan->desc_submitcount = 0;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301693
Kedareswara rao Appana9495f262016-02-26 19:33:54 +05301694 return err;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301695}
1696
1697/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301698 * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
1699 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301700 *
1701 * Return: '0' on success and failure value on error
1702 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301703static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301704{
1705 int err;
1706
1707 /* Reset VDMA */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301708 err = xilinx_dma_reset(chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301709 if (err)
1710 return err;
1711
1712 /* Enable interrupts */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301713 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1714 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301715
1716 return 0;
1717}
1718
1719/**
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +05301720 * xilinx_mcdma_irq_handler - MCDMA Interrupt handler
1721 * @irq: IRQ number
1722 * @data: Pointer to the Xilinx MCDMA channel structure
1723 *
1724 * Return: IRQ_HANDLED/IRQ_NONE
1725 */
1726static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data)
1727{
1728 struct xilinx_dma_chan *chan = data;
1729 u32 status, ser_offset, chan_sermask, chan_offset = 0, chan_id;
1730
1731 if (chan->direction == DMA_DEV_TO_MEM)
1732 ser_offset = XILINX_MCDMA_RXINT_SER_OFFSET;
1733 else
1734 ser_offset = XILINX_MCDMA_TXINT_SER_OFFSET;
1735
1736 /* Read the channel id raising the interrupt*/
1737 chan_sermask = dma_ctrl_read(chan, ser_offset);
1738 chan_id = ffs(chan_sermask);
1739
1740 if (!chan_id)
1741 return IRQ_NONE;
1742
1743 if (chan->direction == DMA_DEV_TO_MEM)
1744 chan_offset = chan->xdev->s2mm_index;
1745
1746 chan_offset = chan_offset + (chan_id - 1);
1747 chan = chan->xdev->chan[chan_offset];
1748 /* Read the status and ack the interrupts. */
1749 status = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest));
1750 if (!(status & XILINX_MCDMA_IRQ_ALL_MASK))
1751 return IRQ_NONE;
1752
1753 dma_ctrl_write(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest),
1754 status & XILINX_MCDMA_IRQ_ALL_MASK);
1755
1756 if (status & XILINX_MCDMA_IRQ_ERR_MASK) {
1757 dev_err(chan->dev, "Channel %p has errors %x cdr %x tdr %x\n",
1758 chan,
1759 dma_ctrl_read(chan, XILINX_MCDMA_CH_ERR_OFFSET),
1760 dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET
1761 (chan->tdest)),
1762 dma_ctrl_read(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET
1763 (chan->tdest)));
1764 chan->err = true;
1765 }
1766
1767 if (status & XILINX_MCDMA_IRQ_DELAY_MASK) {
1768 /*
1769 * Device takes too long to do the transfer when user requires
1770 * responsiveness.
1771 */
1772 dev_dbg(chan->dev, "Inter-packet latency too long\n");
1773 }
1774
1775 if (status & XILINX_MCDMA_IRQ_IOC_MASK) {
1776 spin_lock(&chan->lock);
1777 xilinx_dma_complete_descriptor(chan);
1778 chan->idle = true;
1779 chan->start_transfer(chan);
1780 spin_unlock(&chan->lock);
1781 }
1782
1783 tasklet_schedule(&chan->tasklet);
1784 return IRQ_HANDLED;
1785}
1786
1787/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301788 * xilinx_dma_irq_handler - DMA Interrupt handler
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301789 * @irq: IRQ number
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301790 * @data: Pointer to the Xilinx DMA channel structure
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301791 *
1792 * Return: IRQ_HANDLED/IRQ_NONE
1793 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301794static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301795{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301796 struct xilinx_dma_chan *chan = data;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301797 u32 status;
1798
1799 /* Read the status and ack the interrupts. */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301800 status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
1801 if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301802 return IRQ_NONE;
1803
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301804 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1805 status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301806
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301807 if (status & XILINX_DMA_DMASR_ERR_IRQ) {
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301808 /*
1809 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
1810 * error is recoverable, ignore it. Otherwise flag the error.
1811 *
1812 * Only recoverable errors can be cleared in the DMASR register,
1813 * make sure not to write to other error bits to 1.
1814 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301815 u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
Kedareswara rao Appana48a59ed2016-04-06 10:44:55 +05301816
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301817 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1818 errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301819
1820 if (!chan->flush_on_fsync ||
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301821 (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301822 dev_err(chan->dev,
1823 "Channel %p has errors %x, cdr %x tdr %x\n",
1824 chan, errors,
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301825 dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
1826 dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301827 chan->err = true;
1828 }
1829 }
1830
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301831 if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301832 /*
1833 * Device takes too long to do the transfer when user requires
1834 * responsiveness.
1835 */
1836 dev_dbg(chan->dev, "Inter-packet latency too long\n");
1837 }
1838
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301839 if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301840 spin_lock(&chan->lock);
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301841 xilinx_dma_complete_descriptor(chan);
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05301842 chan->idle = true;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301843 chan->start_transfer(chan);
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301844 spin_unlock(&chan->lock);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301845 }
1846
1847 tasklet_schedule(&chan->tasklet);
1848 return IRQ_HANDLED;
1849}
1850
1851/**
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301852 * append_desc_queue - Queuing descriptor
1853 * @chan: Driver specific dma channel
1854 * @desc: dma transaction descriptor
1855 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301856static void append_desc_queue(struct xilinx_dma_chan *chan,
1857 struct xilinx_dma_tx_descriptor *desc)
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301858{
1859 struct xilinx_vdma_tx_segment *tail_segment;
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301860 struct xilinx_dma_tx_descriptor *tail_desc;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301861 struct xilinx_axidma_tx_segment *axidma_tail_segment;
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301862 struct xilinx_cdma_tx_segment *cdma_tail_segment;
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301863
1864 if (list_empty(&chan->pending_list))
1865 goto append;
1866
1867 /*
1868 * Add the hardware descriptor to the chain of hardware descriptors
1869 * that already exists in memory.
1870 */
1871 tail_desc = list_last_entry(&chan->pending_list,
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301872 struct xilinx_dma_tx_descriptor, node);
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05301873 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301874 tail_segment = list_last_entry(&tail_desc->segments,
1875 struct xilinx_vdma_tx_segment,
1876 node);
1877 tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05301878 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301879 cdma_tail_segment = list_last_entry(&tail_desc->segments,
1880 struct xilinx_cdma_tx_segment,
1881 node);
1882 cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301883 } else {
1884 axidma_tail_segment = list_last_entry(&tail_desc->segments,
1885 struct xilinx_axidma_tx_segment,
1886 node);
1887 axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1888 }
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301889
1890 /*
1891 * Add the software descriptor and all children to the list
1892 * of pending transactions
1893 */
1894append:
1895 list_add_tail(&desc->node, &chan->pending_list);
1896 chan->desc_pendingcount++;
1897
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05301898 if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
1899 && unlikely(chan->desc_pendingcount > chan->num_frms)) {
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301900 dev_dbg(chan->dev, "desc pendingcount is too high\n");
1901 chan->desc_pendingcount = chan->num_frms;
1902 }
1903}
1904
1905/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301906 * xilinx_dma_tx_submit - Submit DMA transaction
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301907 * @tx: Async transaction descriptor
1908 *
1909 * Return: cookie value on success and failure value on error
1910 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301911static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301912{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301913 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
1914 struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301915 dma_cookie_t cookie;
1916 unsigned long flags;
1917 int err;
1918
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301919 if (chan->cyclic) {
1920 xilinx_dma_free_tx_descriptor(chan, desc);
1921 return -EBUSY;
1922 }
1923
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301924 if (chan->err) {
1925 /*
1926 * If reset fails, need to hard reset the system.
1927 * Channel is no longer functional
1928 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301929 err = xilinx_dma_chan_reset(chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301930 if (err < 0)
1931 return err;
1932 }
1933
1934 spin_lock_irqsave(&chan->lock, flags);
1935
1936 cookie = dma_cookie_assign(tx);
1937
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301938 /* Put this transaction onto the tail of the pending queue */
1939 append_desc_queue(chan, desc);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301940
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301941 if (desc->cyclic)
1942 chan->cyclic = true;
1943
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301944 spin_unlock_irqrestore(&chan->lock, flags);
1945
1946 return cookie;
1947}
1948
1949/**
1950 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
1951 * DMA_SLAVE transaction
1952 * @dchan: DMA channel
1953 * @xt: Interleaved template pointer
1954 * @flags: transfer ack flags
1955 *
1956 * Return: Async transaction descriptor on success and NULL on failure
1957 */
1958static struct dma_async_tx_descriptor *
1959xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
1960 struct dma_interleaved_template *xt,
1961 unsigned long flags)
1962{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301963 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1964 struct xilinx_dma_tx_descriptor *desc;
Kedareswara rao Appana4b597c62018-01-03 12:12:10 +05301965 struct xilinx_vdma_tx_segment *segment;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301966 struct xilinx_vdma_desc_hw *hw;
1967
1968 if (!is_slave_direction(xt->dir))
1969 return NULL;
1970
1971 if (!xt->numf || !xt->sgl[0].size)
1972 return NULL;
1973
Srikanth Thokalaa5e48e22014-11-05 20:37:01 +02001974 if (xt->frame_size != 1)
1975 return NULL;
1976
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301977 /* Allocate a transaction descriptor. */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301978 desc = xilinx_dma_alloc_tx_descriptor(chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301979 if (!desc)
1980 return NULL;
1981
1982 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301983 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301984 async_tx_ack(&desc->async_tx);
1985
1986 /* Allocate the link descriptor from DMA pool */
1987 segment = xilinx_vdma_alloc_tx_segment(chan);
1988 if (!segment)
1989 goto error;
1990
1991 /* Fill in the hardware descriptor */
1992 hw = &segment->hw;
1993 hw->vsize = xt->numf;
1994 hw->hsize = xt->sgl[0].size;
Srikanth Thokala6d80f452014-11-05 20:37:02 +02001995 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301996 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301997 hw->stride |= chan->config.frm_dly <<
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301998 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301999
Kedareswara rao Appanab72db402016-04-06 10:38:08 +05302000 if (xt->dir != DMA_MEM_TO_DEV) {
2001 if (chan->ext_addr) {
2002 hw->buf_addr = lower_32_bits(xt->dst_start);
2003 hw->buf_addr_msb = upper_32_bits(xt->dst_start);
2004 } else {
2005 hw->buf_addr = xt->dst_start;
2006 }
2007 } else {
2008 if (chan->ext_addr) {
2009 hw->buf_addr = lower_32_bits(xt->src_start);
2010 hw->buf_addr_msb = upper_32_bits(xt->src_start);
2011 } else {
2012 hw->buf_addr = xt->src_start;
2013 }
2014 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302015
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302016 /* Insert the segment into the descriptor segments list. */
2017 list_add_tail(&segment->node, &desc->segments);
2018
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302019 /* Link the last hardware descriptor with the first. */
2020 segment = list_first_entry(&desc->segments,
2021 struct xilinx_vdma_tx_segment, node);
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05302022 desc->async_tx.phys = segment->phys;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302023
2024 return &desc->async_tx;
2025
2026error:
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302027 xilinx_dma_free_tx_descriptor(chan, desc);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302028 return NULL;
2029}
2030
2031/**
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05302032 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
2033 * @dchan: DMA channel
2034 * @dma_dst: destination address
2035 * @dma_src: source address
2036 * @len: transfer length
2037 * @flags: transfer ack flags
2038 *
2039 * Return: Async transaction descriptor on success and NULL on failure
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302040 */
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05302041static struct dma_async_tx_descriptor *
2042xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
2043 dma_addr_t dma_src, size_t len, unsigned long flags)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302044{
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05302045 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2046 struct xilinx_dma_tx_descriptor *desc;
Akinobu Mitadb6a3d02017-03-14 00:59:12 +09002047 struct xilinx_cdma_tx_segment *segment;
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05302048 struct xilinx_cdma_desc_hw *hw;
2049
Andrea Merello616f0f82018-11-20 16:31:45 +01002050 if (!len || len > chan->xdev->max_buffer_len)
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05302051 return NULL;
2052
2053 desc = xilinx_dma_alloc_tx_descriptor(chan);
2054 if (!desc)
2055 return NULL;
2056
2057 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2058 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2059
2060 /* Allocate the link descriptor from DMA pool */
2061 segment = xilinx_cdma_alloc_tx_segment(chan);
2062 if (!segment)
2063 goto error;
2064
2065 hw = &segment->hw;
2066 hw->control = len;
2067 hw->src_addr = dma_src;
2068 hw->dest_addr = dma_dst;
Kedareswara rao Appana9791e712016-06-07 19:21:16 +05302069 if (chan->ext_addr) {
2070 hw->src_addr_msb = upper_32_bits(dma_src);
2071 hw->dest_addr_msb = upper_32_bits(dma_dst);
2072 }
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05302073
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05302074 /* Insert the segment into the descriptor segments list. */
2075 list_add_tail(&segment->node, &desc->segments);
2076
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05302077 desc->async_tx.phys = segment->phys;
Akinobu Mitadb6a3d02017-03-14 00:59:12 +09002078 hw->next_desc = segment->phys;
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05302079
2080 return &desc->async_tx;
2081
2082error:
2083 xilinx_dma_free_tx_descriptor(chan, desc);
2084 return NULL;
2085}
2086
2087/**
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302088 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
2089 * @dchan: DMA channel
2090 * @sgl: scatterlist to transfer to/from
2091 * @sg_len: number of entries in @scatterlist
2092 * @direction: DMA direction
2093 * @flags: transfer ack flags
2094 * @context: APP words of the descriptor
2095 *
2096 * Return: Async transaction descriptor on success and NULL on failure
2097 */
2098static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
2099 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
2100 enum dma_transfer_direction direction, unsigned long flags,
2101 void *context)
2102{
2103 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2104 struct xilinx_dma_tx_descriptor *desc;
Kedareswara rao Appana23059402017-12-07 10:51:04 +05302105 struct xilinx_axidma_tx_segment *segment = NULL;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302106 u32 *app_w = (u32 *)context;
2107 struct scatterlist *sg;
2108 size_t copy;
2109 size_t sg_used;
2110 unsigned int i;
2111
2112 if (!is_slave_direction(direction))
2113 return NULL;
2114
2115 /* Allocate a transaction descriptor. */
2116 desc = xilinx_dma_alloc_tx_descriptor(chan);
2117 if (!desc)
2118 return NULL;
2119
2120 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2121 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2122
2123 /* Build transactions using information in the scatter gather list */
2124 for_each_sg(sgl, sg, sg_len, i) {
2125 sg_used = 0;
2126
2127 /* Loop until the entire scatterlist entry is used */
2128 while (sg_used < sg_dma_len(sg)) {
2129 struct xilinx_axidma_desc_hw *hw;
2130
2131 /* Get a free segment */
2132 segment = xilinx_axidma_alloc_tx_segment(chan);
2133 if (!segment)
2134 goto error;
2135
2136 /*
2137 * Calculate the maximum number of bytes to transfer,
2138 * making sure it is less than the hw limit
2139 */
Andrea Merello616f0f82018-11-20 16:31:45 +01002140 copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg),
2141 sg_used);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302142 hw = &segment->hw;
2143
2144 /* Fill in the descriptor */
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +05302145 xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
2146 sg_used, 0);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302147
2148 hw->control = copy;
2149
2150 if (chan->direction == DMA_MEM_TO_DEV) {
2151 if (app_w)
2152 memcpy(hw->app, app_w, sizeof(u32) *
2153 XILINX_DMA_NUM_APP_WORDS);
2154 }
2155
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302156 sg_used += copy;
2157
2158 /*
2159 * Insert the segment into the descriptor segments
2160 * list.
2161 */
2162 list_add_tail(&segment->node, &desc->segments);
2163 }
2164 }
2165
2166 segment = list_first_entry(&desc->segments,
2167 struct xilinx_axidma_tx_segment, node);
2168 desc->async_tx.phys = segment->phys;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302169
2170 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2171 if (chan->direction == DMA_MEM_TO_DEV) {
2172 segment->hw.control |= XILINX_DMA_BD_SOP;
2173 segment = list_last_entry(&desc->segments,
2174 struct xilinx_axidma_tx_segment,
2175 node);
2176 segment->hw.control |= XILINX_DMA_BD_EOP;
2177 }
2178
2179 return &desc->async_tx;
2180
2181error:
2182 xilinx_dma_free_tx_descriptor(chan, desc);
2183 return NULL;
2184}
2185
2186/**
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05302187 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +05302188 * @dchan: DMA channel
2189 * @buf_addr: Physical address of the buffer
2190 * @buf_len: Total length of the cyclic buffers
2191 * @period_len: length of individual cyclic buffer
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05302192 * @direction: DMA direction
2193 * @flags: transfer ack flags
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +05302194 *
2195 * Return: Async transaction descriptor on success and NULL on failure
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05302196 */
2197static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
2198 struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
2199 size_t period_len, enum dma_transfer_direction direction,
2200 unsigned long flags)
2201{
2202 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2203 struct xilinx_dma_tx_descriptor *desc;
2204 struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
2205 size_t copy, sg_used;
2206 unsigned int num_periods;
2207 int i;
2208 u32 reg;
2209
Arnd Bergmannf67c3bd2016-06-13 17:07:33 +02002210 if (!period_len)
2211 return NULL;
2212
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05302213 num_periods = buf_len / period_len;
2214
Arnd Bergmannf67c3bd2016-06-13 17:07:33 +02002215 if (!num_periods)
2216 return NULL;
2217
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05302218 if (!is_slave_direction(direction))
2219 return NULL;
2220
2221 /* Allocate a transaction descriptor. */
2222 desc = xilinx_dma_alloc_tx_descriptor(chan);
2223 if (!desc)
2224 return NULL;
2225
2226 chan->direction = direction;
2227 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2228 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2229
2230 for (i = 0; i < num_periods; ++i) {
2231 sg_used = 0;
2232
2233 while (sg_used < period_len) {
2234 struct xilinx_axidma_desc_hw *hw;
2235
2236 /* Get a free segment */
2237 segment = xilinx_axidma_alloc_tx_segment(chan);
2238 if (!segment)
2239 goto error;
2240
2241 /*
2242 * Calculate the maximum number of bytes to transfer,
2243 * making sure it is less than the hw limit
2244 */
Andrea Merello616f0f82018-11-20 16:31:45 +01002245 copy = xilinx_dma_calc_copysize(chan, period_len,
2246 sg_used);
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05302247 hw = &segment->hw;
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +05302248 xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
2249 period_len * i);
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05302250 hw->control = copy;
2251
2252 if (prev)
2253 prev->hw.next_desc = segment->phys;
2254
2255 prev = segment;
2256 sg_used += copy;
2257
2258 /*
2259 * Insert the segment into the descriptor segments
2260 * list.
2261 */
2262 list_add_tail(&segment->node, &desc->segments);
2263 }
2264 }
2265
2266 head_segment = list_first_entry(&desc->segments,
2267 struct xilinx_axidma_tx_segment, node);
2268 desc->async_tx.phys = head_segment->phys;
2269
2270 desc->cyclic = true;
2271 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2272 reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2273 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2274
Kedareswara rao Appanae598e6e2016-07-09 14:09:48 +05302275 segment = list_last_entry(&desc->segments,
2276 struct xilinx_axidma_tx_segment,
2277 node);
2278 segment->hw.next_desc = (u32) head_segment->phys;
2279
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05302280 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2281 if (direction == DMA_MEM_TO_DEV) {
Kedareswara rao Appanae167a0b2016-06-09 11:32:12 +05302282 head_segment->hw.control |= XILINX_DMA_BD_SOP;
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05302283 segment->hw.control |= XILINX_DMA_BD_EOP;
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05302284 }
2285
2286 return &desc->async_tx;
2287
2288error:
2289 xilinx_dma_free_tx_descriptor(chan, desc);
2290 return NULL;
2291}
2292
2293/**
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +05302294 * xilinx_mcdma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
2295 * @dchan: DMA channel
2296 * @sgl: scatterlist to transfer to/from
2297 * @sg_len: number of entries in @scatterlist
2298 * @direction: DMA direction
2299 * @flags: transfer ack flags
2300 * @context: APP words of the descriptor
2301 *
2302 * Return: Async transaction descriptor on success and NULL on failure
2303 */
2304static struct dma_async_tx_descriptor *
2305xilinx_mcdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
2306 unsigned int sg_len,
2307 enum dma_transfer_direction direction,
2308 unsigned long flags, void *context)
2309{
2310 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2311 struct xilinx_dma_tx_descriptor *desc;
2312 struct xilinx_aximcdma_tx_segment *segment = NULL;
2313 u32 *app_w = (u32 *)context;
2314 struct scatterlist *sg;
2315 size_t copy;
2316 size_t sg_used;
2317 unsigned int i;
2318
2319 if (!is_slave_direction(direction))
2320 return NULL;
2321
2322 /* Allocate a transaction descriptor. */
2323 desc = xilinx_dma_alloc_tx_descriptor(chan);
2324 if (!desc)
2325 return NULL;
2326
2327 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2328 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2329
2330 /* Build transactions using information in the scatter gather list */
2331 for_each_sg(sgl, sg, sg_len, i) {
2332 sg_used = 0;
2333
2334 /* Loop until the entire scatterlist entry is used */
2335 while (sg_used < sg_dma_len(sg)) {
2336 struct xilinx_aximcdma_desc_hw *hw;
2337
2338 /* Get a free segment */
2339 segment = xilinx_aximcdma_alloc_tx_segment(chan);
2340 if (!segment)
2341 goto error;
2342
2343 /*
2344 * Calculate the maximum number of bytes to transfer,
2345 * making sure it is less than the hw limit
2346 */
2347 copy = min_t(size_t, sg_dma_len(sg) - sg_used,
2348 chan->xdev->max_buffer_len);
2349 hw = &segment->hw;
2350
2351 /* Fill in the descriptor */
2352 xilinx_aximcdma_buf(chan, hw, sg_dma_address(sg),
2353 sg_used);
2354 hw->control = copy;
2355
2356 if (chan->direction == DMA_MEM_TO_DEV && app_w) {
2357 memcpy(hw->app, app_w, sizeof(u32) *
2358 XILINX_DMA_NUM_APP_WORDS);
2359 }
2360
2361 sg_used += copy;
2362 /*
2363 * Insert the segment into the descriptor segments
2364 * list.
2365 */
2366 list_add_tail(&segment->node, &desc->segments);
2367 }
2368 }
2369
2370 segment = list_first_entry(&desc->segments,
2371 struct xilinx_aximcdma_tx_segment, node);
2372 desc->async_tx.phys = segment->phys;
2373
2374 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2375 if (chan->direction == DMA_MEM_TO_DEV) {
2376 segment->hw.control |= XILINX_MCDMA_BD_SOP;
2377 segment = list_last_entry(&desc->segments,
2378 struct xilinx_aximcdma_tx_segment,
2379 node);
2380 segment->hw.control |= XILINX_MCDMA_BD_EOP;
2381 }
2382
2383 return &desc->async_tx;
2384
2385error:
2386 xilinx_dma_free_tx_descriptor(chan, desc);
2387
2388 return NULL;
2389}
2390
2391/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302392 * xilinx_dma_terminate_all - Halt the channel and free descriptors
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +05302393 * @dchan: Driver specific DMA Channel pointer
2394 *
2395 * Return: '0' always.
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302396 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302397static int xilinx_dma_terminate_all(struct dma_chan *dchan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302398{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302399 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05302400 u32 reg;
Akinobu Mita676f9c22017-03-14 00:59:11 +09002401 int err;
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05302402
2403 if (chan->cyclic)
2404 xilinx_dma_chan_reset(chan);
Maxime Ripardba714042014-11-17 14:42:38 +01002405
Akinobu Mita676f9c22017-03-14 00:59:11 +09002406 err = chan->stop_transfer(chan);
2407 if (err) {
2408 dev_err(chan->dev, "Cannot stop channel %p: %x\n",
2409 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
2410 chan->err = true;
2411 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302412
2413 /* Remove and free all of the descriptors in the lists */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302414 xilinx_dma_free_descriptors(chan);
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05302415 chan->idle = true;
Maxime Ripardba714042014-11-17 14:42:38 +01002416
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05302417 if (chan->cyclic) {
2418 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2419 reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2420 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2421 chan->cyclic = false;
2422 }
2423
Kedareswara rao Appana48c62fb2018-01-03 12:12:09 +05302424 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
2425 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2426 XILINX_CDMA_CR_SGMODE);
2427
Maxime Ripardba714042014-11-17 14:42:38 +01002428 return 0;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302429}
2430
2431/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302432 * xilinx_dma_channel_set_config - Configure VDMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302433 * Run-time configuration for Axi VDMA, supports:
2434 * . halt the channel
2435 * . configure interrupt coalescing and inter-packet delay threshold
2436 * . start/stop parking
2437 * . enable genlock
2438 *
2439 * @dchan: DMA channel
2440 * @cfg: VDMA device configuration pointer
2441 *
2442 * Return: '0' on success and failure value on error
2443 */
2444int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
2445 struct xilinx_vdma_config *cfg)
2446{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302447 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302448 u32 dmacr;
2449
2450 if (cfg->reset)
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302451 return xilinx_dma_chan_reset(chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302452
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302453 dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302454
2455 chan->config.frm_dly = cfg->frm_dly;
2456 chan->config.park = cfg->park;
2457
2458 /* genlock settings */
2459 chan->config.gen_lock = cfg->gen_lock;
2460 chan->config.master = cfg->master;
2461
2462 if (cfg->gen_lock && chan->genlock) {
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302463 dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
2464 dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302465 }
2466
2467 chan->config.frm_cnt_en = cfg->frm_cnt_en;
Radhey Shyam Pandey0894aa22018-06-13 13:04:48 +05302468 chan->config.vflip_en = cfg->vflip_en;
2469
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302470 if (cfg->park)
2471 chan->config.park_frm = cfg->park_frm;
2472 else
2473 chan->config.park_frm = -1;
2474
2475 chan->config.coalesc = cfg->coalesc;
2476 chan->config.delay = cfg->delay;
2477
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302478 if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
2479 dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302480 chan->config.coalesc = cfg->coalesc;
2481 }
2482
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302483 if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
2484 dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302485 chan->config.delay = cfg->delay;
2486 }
2487
2488 /* FSync Source selection */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302489 dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
2490 dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302491
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302492 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302493
2494 return 0;
2495}
2496EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
2497
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302498/* -----------------------------------------------------------------------------
2499 * Probe and remove
2500 */
2501
2502/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302503 * xilinx_dma_chan_remove - Per Channel remove function
2504 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302505 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302506static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302507{
2508 /* Disable all interrupts */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302509 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2510 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302511
2512 if (chan->irq > 0)
2513 free_irq(chan->irq, chan);
2514
2515 tasklet_kill(&chan->tasklet);
2516
2517 list_del(&chan->common.device_node);
2518}
2519
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302520static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2521 struct clk **tx_clk, struct clk **rx_clk,
2522 struct clk **sg_clk, struct clk **tmp_clk)
2523{
2524 int err;
2525
2526 *tmp_clk = NULL;
2527
2528 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2529 if (IS_ERR(*axi_clk)) {
2530 err = PTR_ERR(*axi_clk);
Radhey Shyam Pandey944879b2019-09-26 16:21:00 +05302531 if (err != -EPROBE_DEFER)
2532 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n",
2533 err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302534 return err;
2535 }
2536
2537 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2538 if (IS_ERR(*tx_clk))
2539 *tx_clk = NULL;
2540
2541 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2542 if (IS_ERR(*rx_clk))
2543 *rx_clk = NULL;
2544
2545 *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
2546 if (IS_ERR(*sg_clk))
2547 *sg_clk = NULL;
2548
2549 err = clk_prepare_enable(*axi_clk);
2550 if (err) {
Lars-Peter Clausen574897d2017-08-31 13:35:10 +02002551 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302552 return err;
2553 }
2554
2555 err = clk_prepare_enable(*tx_clk);
2556 if (err) {
Lars-Peter Clausen574897d2017-08-31 13:35:10 +02002557 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302558 goto err_disable_axiclk;
2559 }
2560
2561 err = clk_prepare_enable(*rx_clk);
2562 if (err) {
Lars-Peter Clausen574897d2017-08-31 13:35:10 +02002563 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302564 goto err_disable_txclk;
2565 }
2566
2567 err = clk_prepare_enable(*sg_clk);
2568 if (err) {
Lars-Peter Clausen574897d2017-08-31 13:35:10 +02002569 dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302570 goto err_disable_rxclk;
2571 }
2572
2573 return 0;
2574
2575err_disable_rxclk:
2576 clk_disable_unprepare(*rx_clk);
2577err_disable_txclk:
2578 clk_disable_unprepare(*tx_clk);
2579err_disable_axiclk:
2580 clk_disable_unprepare(*axi_clk);
2581
2582 return err;
2583}
2584
2585static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2586 struct clk **dev_clk, struct clk **tmp_clk,
2587 struct clk **tmp1_clk, struct clk **tmp2_clk)
2588{
2589 int err;
2590
2591 *tmp_clk = NULL;
2592 *tmp1_clk = NULL;
2593 *tmp2_clk = NULL;
2594
2595 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2596 if (IS_ERR(*axi_clk)) {
2597 err = PTR_ERR(*axi_clk);
Radhey Shyam Pandey944879b2019-09-26 16:21:00 +05302598 if (err != -EPROBE_DEFER)
2599 dev_err(&pdev->dev, "failed to get axi_clk (%d)\n",
2600 err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302601 return err;
2602 }
2603
2604 *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
2605 if (IS_ERR(*dev_clk)) {
2606 err = PTR_ERR(*dev_clk);
Radhey Shyam Pandey944879b2019-09-26 16:21:00 +05302607 if (err != -EPROBE_DEFER)
2608 dev_err(&pdev->dev, "failed to get dev_clk (%d)\n",
2609 err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302610 return err;
2611 }
2612
2613 err = clk_prepare_enable(*axi_clk);
2614 if (err) {
Lars-Peter Clausen574897d2017-08-31 13:35:10 +02002615 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302616 return err;
2617 }
2618
2619 err = clk_prepare_enable(*dev_clk);
2620 if (err) {
Lars-Peter Clausen574897d2017-08-31 13:35:10 +02002621 dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302622 goto err_disable_axiclk;
2623 }
2624
2625 return 0;
2626
2627err_disable_axiclk:
2628 clk_disable_unprepare(*axi_clk);
2629
2630 return err;
2631}
2632
2633static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2634 struct clk **tx_clk, struct clk **txs_clk,
2635 struct clk **rx_clk, struct clk **rxs_clk)
2636{
2637 int err;
2638
2639 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2640 if (IS_ERR(*axi_clk)) {
2641 err = PTR_ERR(*axi_clk);
Radhey Shyam Pandey944879b2019-09-26 16:21:00 +05302642 if (err != -EPROBE_DEFER)
2643 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n",
2644 err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302645 return err;
2646 }
2647
2648 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2649 if (IS_ERR(*tx_clk))
2650 *tx_clk = NULL;
2651
2652 *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
2653 if (IS_ERR(*txs_clk))
2654 *txs_clk = NULL;
2655
2656 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2657 if (IS_ERR(*rx_clk))
2658 *rx_clk = NULL;
2659
2660 *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
2661 if (IS_ERR(*rxs_clk))
2662 *rxs_clk = NULL;
2663
2664 err = clk_prepare_enable(*axi_clk);
2665 if (err) {
Radhey Shyam Pandey944879b2019-09-26 16:21:00 +05302666 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n",
2667 err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302668 return err;
2669 }
2670
2671 err = clk_prepare_enable(*tx_clk);
2672 if (err) {
Lars-Peter Clausen574897d2017-08-31 13:35:10 +02002673 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302674 goto err_disable_axiclk;
2675 }
2676
2677 err = clk_prepare_enable(*txs_clk);
2678 if (err) {
Lars-Peter Clausen574897d2017-08-31 13:35:10 +02002679 dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302680 goto err_disable_txclk;
2681 }
2682
2683 err = clk_prepare_enable(*rx_clk);
2684 if (err) {
Lars-Peter Clausen574897d2017-08-31 13:35:10 +02002685 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302686 goto err_disable_txsclk;
2687 }
2688
2689 err = clk_prepare_enable(*rxs_clk);
2690 if (err) {
Lars-Peter Clausen574897d2017-08-31 13:35:10 +02002691 dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302692 goto err_disable_rxclk;
2693 }
2694
2695 return 0;
2696
2697err_disable_rxclk:
2698 clk_disable_unprepare(*rx_clk);
2699err_disable_txsclk:
2700 clk_disable_unprepare(*txs_clk);
2701err_disable_txclk:
2702 clk_disable_unprepare(*tx_clk);
2703err_disable_axiclk:
2704 clk_disable_unprepare(*axi_clk);
2705
2706 return err;
2707}
2708
2709static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
2710{
2711 clk_disable_unprepare(xdev->rxs_clk);
2712 clk_disable_unprepare(xdev->rx_clk);
2713 clk_disable_unprepare(xdev->txs_clk);
2714 clk_disable_unprepare(xdev->tx_clk);
2715 clk_disable_unprepare(xdev->axi_clk);
2716}
2717
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302718/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302719 * xilinx_dma_chan_probe - Per Channel Probing
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302720 * It get channel features from the device tree entry and
2721 * initialize special channel handling routines
2722 *
2723 * @xdev: Driver specific device structure
2724 * @node: Device node
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +05302725 * @chan_id: DMA Channel id
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302726 *
2727 * Return: '0' on success and failure value on error
2728 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302729static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05302730 struct device_node *node, int chan_id)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302731{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302732 struct xilinx_dma_chan *chan;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302733 bool has_dre = false;
2734 u32 value, width;
2735 int err;
2736
2737 /* Allocate and initialize the channel structure */
2738 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
2739 if (!chan)
2740 return -ENOMEM;
2741
2742 chan->dev = xdev->dev;
2743 chan->xdev = xdev;
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05302744 chan->desc_pendingcount = 0x0;
Kedareswara rao Appanab72db402016-04-06 10:38:08 +05302745 chan->ext_addr = xdev->ext_addr;
Vinod Koul30931862017-12-18 10:48:05 +05302746 /* This variable ensures that descriptors are not
2747 * Submitted when dma engine is in progress. This variable is
2748 * Added to avoid polling for a bit in the status register to
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05302749 * Know dma state in the driver hot path.
2750 */
2751 chan->idle = true;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302752
2753 spin_lock_init(&chan->lock);
2754 INIT_LIST_HEAD(&chan->pending_list);
2755 INIT_LIST_HEAD(&chan->done_list);
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05302756 INIT_LIST_HEAD(&chan->active_list);
Kedareswara rao Appana23059402017-12-07 10:51:04 +05302757 INIT_LIST_HEAD(&chan->free_seg_list);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302758
2759 /* Retrieve the channel properties from the device tree */
2760 has_dre = of_property_read_bool(node, "xlnx,include-dre");
2761
2762 chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
2763
2764 err = of_property_read_u32(node, "xlnx,datawidth", &value);
2765 if (err) {
2766 dev_err(xdev->dev, "missing xlnx,datawidth property\n");
2767 return err;
2768 }
2769 width = value >> 3; /* Convert bits to bytes */
2770
2771 /* If data width is greater than 8 bytes, DRE is not in hw */
2772 if (width > 8)
2773 has_dre = false;
2774
2775 if (!has_dre)
2776 xdev->common.copy_align = fls(width - 1);
2777
Kedareswara rao Appanae131f1b2016-06-24 10:51:26 +05302778 if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
2779 of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
2780 of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302781 chan->direction = DMA_MEM_TO_DEV;
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05302782 chan->id = chan_id;
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +05302783 chan->tdest = chan_id;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302784
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302785 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302786 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302787 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
Kedareswara rao Appanafe0503e2017-12-07 10:51:03 +05302788 chan->config.park = 1;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302789
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302790 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2791 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
2792 chan->flush_on_fsync = true;
2793 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302794 } else if (of_device_is_compatible(node,
Kedareswara rao Appanae131f1b2016-06-24 10:51:26 +05302795 "xlnx,axi-vdma-s2mm-channel") ||
2796 of_device_is_compatible(node,
2797 "xlnx,axi-dma-s2mm-channel")) {
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302798 chan->direction = DMA_DEV_TO_MEM;
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05302799 chan->id = chan_id;
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +05302800 xdev->s2mm_index = xdev->nr_channels;
2801 chan->tdest = chan_id - xdev->nr_channels;
Radhey Shyam Pandey0894aa22018-06-13 13:04:48 +05302802 chan->has_vflip = of_property_read_bool(node,
2803 "xlnx,enable-vert-flip");
2804 if (chan->has_vflip) {
2805 chan->config.vflip_en = dma_read(chan,
2806 XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) &
2807 XILINX_VDMA_ENABLE_VERTICAL_FLIP;
2808 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302809
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +05302810 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
2811 chan->ctrl_offset = XILINX_MCDMA_S2MM_CTRL_OFFSET;
2812 else
2813 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
2814
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302815 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302816 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
Kedareswara rao Appanafe0503e2017-12-07 10:51:03 +05302817 chan->config.park = 1;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302818
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302819 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2820 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
2821 chan->flush_on_fsync = true;
2822 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302823 } else {
2824 dev_err(xdev->dev, "Invalid channel compatible node\n");
2825 return -EINVAL;
2826 }
2827
2828 /* Request the interrupt */
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +05302829 chan->irq = irq_of_parse_and_map(node, chan->tdest);
Radhey Shyam Pandeyc2f6b672019-10-22 22:30:21 +05302830 err = request_irq(chan->irq, xdev->dma_config->irq_handler,
2831 IRQF_SHARED, "xilinx-dma-controller", chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302832 if (err) {
2833 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
2834 return err;
2835 }
2836
Akinobu Mita676f9c22017-03-14 00:59:11 +09002837 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302838 chan->start_transfer = xilinx_dma_start_transfer;
Akinobu Mita676f9c22017-03-14 00:59:11 +09002839 chan->stop_transfer = xilinx_dma_stop_transfer;
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +05302840 } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
2841 chan->start_transfer = xilinx_mcdma_start_transfer;
2842 chan->stop_transfer = xilinx_dma_stop_transfer;
Akinobu Mita676f9c22017-03-14 00:59:11 +09002843 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05302844 chan->start_transfer = xilinx_cdma_start_transfer;
Akinobu Mita676f9c22017-03-14 00:59:11 +09002845 chan->stop_transfer = xilinx_cdma_stop_transfer;
2846 } else {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302847 chan->start_transfer = xilinx_vdma_start_transfer;
Akinobu Mita676f9c22017-03-14 00:59:11 +09002848 chan->stop_transfer = xilinx_dma_stop_transfer;
2849 }
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302850
Andrea Merello05f7ea72018-11-20 16:31:49 +01002851 /* check if SG is enabled (only for AXIDMA and CDMA) */
2852 if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) {
2853 if (dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
2854 XILINX_DMA_DMASR_SG_MASK)
2855 chan->has_sg = true;
2856 dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id,
2857 chan->has_sg ? "enabled" : "disabled");
2858 }
2859
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302860 /* Initialize the tasklet */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302861 tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302862 (unsigned long)chan);
2863
2864 /*
2865 * Initialize the DMA channel and add it to the DMA engine channels
2866 * list.
2867 */
2868 chan->common.device = &xdev->common;
2869
2870 list_add_tail(&chan->common.device_node, &xdev->common.channels);
2871 xdev->chan[chan->id] = chan;
2872
2873 /* Reset the channel */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302874 err = xilinx_dma_chan_reset(chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302875 if (err < 0) {
2876 dev_err(xdev->dev, "Reset channel failed\n");
2877 return err;
2878 }
2879
2880 return 0;
2881}
2882
2883/**
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05302884 * xilinx_dma_child_probe - Per child node probe
2885 * It get number of dma-channels per child node from
2886 * device-tree and initializes all the channels.
2887 *
2888 * @xdev: Driver specific device structure
2889 * @node: Device node
2890 *
2891 * Return: 0 always.
2892 */
2893static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
Kedareswara rao Appana22653af2017-12-07 10:51:06 +05302894 struct device_node *node)
2895{
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +05302896 int ret, i, nr_channels = 1;
2897
2898 ret = of_property_read_u32(node, "dma-channels", &nr_channels);
2899 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0)
2900 dev_warn(xdev->dev, "missing dma-channels property\n");
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05302901
2902 for (i = 0; i < nr_channels; i++)
2903 xilinx_dma_chan_probe(xdev, node, xdev->chan_id++);
2904
2905 xdev->nr_channels += nr_channels;
2906
2907 return 0;
2908}
2909
2910/**
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302911 * of_dma_xilinx_xlate - Translation function
2912 * @dma_spec: Pointer to DMA specifier as found in the device tree
2913 * @ofdma: Pointer to DMA controller data
2914 *
2915 * Return: DMA channel pointer on success and NULL on error
2916 */
2917static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
2918 struct of_dma *ofdma)
2919{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302920 struct xilinx_dma_device *xdev = ofdma->of_dma_data;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302921 int chan_id = dma_spec->args[0];
2922
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05302923 if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id])
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302924 return NULL;
2925
2926 return dma_get_slave_channel(&xdev->chan[chan_id]->common);
2927}
2928
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302929static const struct xilinx_dma_config axidma_config = {
2930 .dmatype = XDMA_TYPE_AXIDMA,
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302931 .clk_init = axidma_clk_init,
Radhey Shyam Pandeyc2f6b672019-10-22 22:30:21 +05302932 .irq_handler = xilinx_dma_irq_handler,
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302933};
2934
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +05302935static const struct xilinx_dma_config aximcdma_config = {
2936 .dmatype = XDMA_TYPE_AXIMCDMA,
2937 .clk_init = axidma_clk_init,
2938 .irq_handler = xilinx_mcdma_irq_handler,
2939};
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302940static const struct xilinx_dma_config axicdma_config = {
2941 .dmatype = XDMA_TYPE_CDMA,
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302942 .clk_init = axicdma_clk_init,
Radhey Shyam Pandeyc2f6b672019-10-22 22:30:21 +05302943 .irq_handler = xilinx_dma_irq_handler,
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302944};
2945
2946static const struct xilinx_dma_config axivdma_config = {
2947 .dmatype = XDMA_TYPE_VDMA,
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302948 .clk_init = axivdma_clk_init,
Radhey Shyam Pandeyc2f6b672019-10-22 22:30:21 +05302949 .irq_handler = xilinx_dma_irq_handler,
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302950};
2951
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302952static const struct of_device_id xilinx_dma_of_ids[] = {
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302953 { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
2954 { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
2955 { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +05302956 { .compatible = "xlnx,axi-mcdma-1.00.a", .data = &aximcdma_config },
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302957 {}
2958};
2959MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
2960
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302961/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302962 * xilinx_dma_probe - Driver probe function
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302963 * @pdev: Pointer to the platform_device structure
2964 *
2965 * Return: '0' on success and failure value on error
2966 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302967static int xilinx_dma_probe(struct platform_device *pdev)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302968{
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302969 int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
2970 struct clk **, struct clk **, struct clk **)
2971 = axivdma_clk_init;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302972 struct device_node *node = pdev->dev.of_node;
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302973 struct xilinx_dma_device *xdev;
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302974 struct device_node *child, *np = pdev->dev.of_node;
Radhey Shyam Pandeyae809692018-11-20 16:31:48 +01002975 u32 num_frames, addr_width, len_width;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302976 int i, err;
2977
2978 /* Allocate and initialize the DMA engine structure */
2979 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
2980 if (!xdev)
2981 return -ENOMEM;
2982
2983 xdev->dev = &pdev->dev;
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302984 if (np) {
2985 const struct of_device_id *match;
2986
2987 match = of_match_node(xilinx_dma_of_ids, np);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302988 if (match && match->data) {
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302989 xdev->dma_config = match->data;
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302990 clk_init = xdev->dma_config->clk_init;
2991 }
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302992 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302993
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302994 err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
2995 &xdev->rx_clk, &xdev->rxs_clk);
2996 if (err)
2997 return err;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302998
2999 /* Request and map I/O memory */
Radhey Shyam Pandeya8bd4752019-09-26 16:20:59 +05303000 xdev->regs = devm_platform_ioremap_resource(pdev, 0);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05303001 if (IS_ERR(xdev->regs))
3002 return PTR_ERR(xdev->regs);
3003
3004 /* Retrieve the DMA engine properties from the device tree */
Radhey Shyam Pandeyae809692018-11-20 16:31:48 +01003005 xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
Andrea Merello616f0f82018-11-20 16:31:45 +01003006
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +05303007 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA ||
3008 xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
Radhey Shyam Pandeyae809692018-11-20 16:31:48 +01003009 if (!of_property_read_u32(node, "xlnx,sg-length-width",
3010 &len_width)) {
3011 if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
3012 len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) {
3013 dev_warn(xdev->dev,
3014 "invalid xlnx,sg-length-width property value. Using default width\n");
3015 } else {
3016 if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX)
3017 dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n");
3018 xdev->max_buffer_len =
3019 GENMASK(len_width - 1, 0);
3020 }
3021 }
3022 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05303023
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05303024 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05303025 err = of_property_read_u32(node, "xlnx,num-fstores",
3026 &num_frames);
3027 if (err < 0) {
3028 dev_err(xdev->dev,
3029 "missing xlnx,num-fstores property\n");
3030 return err;
3031 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05303032
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05303033 err = of_property_read_u32(node, "xlnx,flush-fsync",
3034 &xdev->flush_on_fsync);
3035 if (err < 0)
3036 dev_warn(xdev->dev,
3037 "missing xlnx,flush-fsync property\n");
Srikanth Thokala9cd43602014-04-23 20:23:26 +05303038 }
3039
Kedareswara rao Appanab72db402016-04-06 10:38:08 +05303040 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05303041 if (err < 0)
Kedareswara rao Appanab72db402016-04-06 10:38:08 +05303042 dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
3043
3044 if (addr_width > 32)
3045 xdev->ext_addr = true;
3046 else
3047 xdev->ext_addr = false;
3048
3049 /* Set the dma mask bits */
3050 dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width));
Srikanth Thokala9cd43602014-04-23 20:23:26 +05303051
3052 /* Initialize the DMA engine */
3053 xdev->common.dev = &pdev->dev;
3054
3055 INIT_LIST_HEAD(&xdev->common.channels);
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05303056 if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05303057 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
3058 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
3059 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05303060
3061 xdev->common.device_alloc_chan_resources =
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05303062 xilinx_dma_alloc_chan_resources;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05303063 xdev->common.device_free_chan_resources =
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05303064 xilinx_dma_free_chan_resources;
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05303065 xdev->common.device_terminate_all = xilinx_dma_terminate_all;
3066 xdev->common.device_tx_status = xilinx_dma_tx_status;
3067 xdev->common.device_issue_pending = xilinx_dma_issue_pending;
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05303068 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05303069 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05303070 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05303071 xdev->common.device_prep_dma_cyclic =
3072 xilinx_dma_prep_dma_cyclic;
Nicholas Graumanna575d0b2019-10-15 20:18:21 +05303073 /* Residue calculation is supported by only AXI DMA and CDMA */
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05303074 xdev->common.residue_granularity =
3075 DMA_RESIDUE_GRANULARITY_SEGMENT;
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05303076 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05303077 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
3078 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
Nicholas Graumanna575d0b2019-10-15 20:18:21 +05303079 /* Residue calculation is supported by only AXI DMA and CDMA */
3080 xdev->common.residue_granularity =
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +05303081 DMA_RESIDUE_GRANULARITY_SEGMENT;
3082 } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
3083 xdev->common.device_prep_slave_sg = xilinx_mcdma_prep_slave_sg;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05303084 } else {
3085 xdev->common.device_prep_interleaved_dma =
Srikanth Thokala9cd43602014-04-23 20:23:26 +05303086 xilinx_vdma_dma_prep_interleaved;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05303087 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05303088
3089 platform_set_drvdata(pdev, xdev);
3090
3091 /* Initialize the channels */
3092 for_each_child_of_node(node, child) {
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05303093 err = xilinx_dma_child_probe(xdev, child);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05303094 if (err < 0)
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05303095 goto disable_clks;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05303096 }
3097
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05303098 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05303099 for (i = 0; i < xdev->nr_channels; i++)
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05303100 if (xdev->chan[i])
3101 xdev->chan[i]->num_frms = num_frames;
3102 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05303103
3104 /* Register the DMA engine with the core */
3105 dma_async_device_register(&xdev->common);
3106
3107 err = of_dma_controller_register(node, of_dma_xilinx_xlate,
3108 xdev);
3109 if (err < 0) {
3110 dev_err(&pdev->dev, "Unable to register DMA to DT\n");
3111 dma_async_device_unregister(&xdev->common);
3112 goto error;
3113 }
3114
Kedareswara rao Appanac7a03592017-12-07 10:51:07 +05303115 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
3116 dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
3117 else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
3118 dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
Radhey Shyam Pandey6ccd6922019-10-22 22:30:22 +05303119 else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
3120 dev_info(&pdev->dev, "Xilinx AXI MCDMA Engine Driver Probed!!\n");
Kedareswara rao Appanac7a03592017-12-07 10:51:07 +05303121 else
3122 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
Srikanth Thokala9cd43602014-04-23 20:23:26 +05303123
3124 return 0;
3125
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05303126disable_clks:
3127 xdma_disable_allclks(xdev);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05303128error:
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05303129 for (i = 0; i < xdev->nr_channels; i++)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05303130 if (xdev->chan[i])
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05303131 xilinx_dma_chan_remove(xdev->chan[i]);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05303132
3133 return err;
3134}
3135
3136/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05303137 * xilinx_dma_remove - Driver remove function
Srikanth Thokala9cd43602014-04-23 20:23:26 +05303138 * @pdev: Pointer to the platform_device structure
3139 *
3140 * Return: Always '0'
3141 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05303142static int xilinx_dma_remove(struct platform_device *pdev)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05303143{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05303144 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05303145 int i;
3146
3147 of_dma_controller_free(pdev->dev.of_node);
3148
3149 dma_async_device_unregister(&xdev->common);
3150
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05303151 for (i = 0; i < xdev->nr_channels; i++)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05303152 if (xdev->chan[i])
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05303153 xilinx_dma_chan_remove(xdev->chan[i]);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05303154
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05303155 xdma_disable_allclks(xdev);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05303156
3157 return 0;
3158}
3159
Srikanth Thokala9cd43602014-04-23 20:23:26 +05303160static struct platform_driver xilinx_vdma_driver = {
3161 .driver = {
3162 .name = "xilinx-vdma",
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05303163 .of_match_table = xilinx_dma_of_ids,
Srikanth Thokala9cd43602014-04-23 20:23:26 +05303164 },
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05303165 .probe = xilinx_dma_probe,
3166 .remove = xilinx_dma_remove,
Srikanth Thokala9cd43602014-04-23 20:23:26 +05303167};
3168
3169module_platform_driver(xilinx_vdma_driver);
3170
3171MODULE_AUTHOR("Xilinx, Inc.");
3172MODULE_DESCRIPTION("Xilinx VDMA driver");
3173MODULE_LICENSE("GPL v2");