blob: 6d4586550f5618978e8258dc5f0f8c4dc9552364 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302/*
3 * DMA driver for Xilinx Video DMA Engine
4 *
5 * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
6 *
7 * Based on the Freescale DMA driver.
8 *
9 * Description:
10 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
11 * core that provides high-bandwidth direct memory access between memory
12 * and AXI4-Stream type video target peripherals. The core provides efficient
13 * two dimensional DMA operations with independent asynchronous read (S2MM)
14 * and write (MM2S) channel operation. It can be configured to have either
15 * one channel or two channels. If configured as two channels, one is to
16 * transmit to the video device (MM2S) and another is to receive from the
17 * video device (S2MM). Initialization, status, interrupt and management
18 * registers are accessed through an AXI4-Lite slave interface.
19 *
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +053020 * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
21 * provides high-bandwidth one dimensional direct memory access between memory
22 * and AXI4-Stream target peripherals. It supports one receive and one
23 * transmit channel, both of them optional at synthesis time.
24 *
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +053025 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
26 * Access (DMA) between a memory-mapped source address and a memory-mapped
27 * destination address.
Srikanth Thokala9cd43602014-04-23 20:23:26 +053028 */
29
Srikanth Thokala9cd43602014-04-23 20:23:26 +053030#include <linux/bitops.h>
31#include <linux/dmapool.h>
Kedareswara rao Appana937abe82015-03-02 23:24:24 +053032#include <linux/dma/xilinx_dma.h>
Srikanth Thokala9cd43602014-04-23 20:23:26 +053033#include <linux/init.h>
34#include <linux/interrupt.h>
35#include <linux/io.h>
Kedareswara rao Appana9495f262016-02-26 19:33:54 +053036#include <linux/iopoll.h>
Srikanth Thokala9cd43602014-04-23 20:23:26 +053037#include <linux/module.h>
38#include <linux/of_address.h>
39#include <linux/of_dma.h>
40#include <linux/of_platform.h>
41#include <linux/of_irq.h>
42#include <linux/slab.h>
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +053043#include <linux/clk.h>
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +053044#include <linux/io-64-nonatomic-lo-hi.h>
Srikanth Thokala9cd43602014-04-23 20:23:26 +053045
46#include "../dmaengine.h"
47
48/* Register/Descriptor Offsets */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +053049#define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
50#define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
Srikanth Thokala9cd43602014-04-23 20:23:26 +053051#define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
52#define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
53
54/* Control Registers */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +053055#define XILINX_DMA_REG_DMACR 0x0000
56#define XILINX_DMA_DMACR_DELAY_MAX 0xff
57#define XILINX_DMA_DMACR_DELAY_SHIFT 24
58#define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
59#define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
60#define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
61#define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
62#define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
63#define XILINX_DMA_DMACR_MASTER_SHIFT 8
64#define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
65#define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
66#define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
67#define XILINX_DMA_DMACR_RESET BIT(2)
68#define XILINX_DMA_DMACR_CIRC_EN BIT(1)
69#define XILINX_DMA_DMACR_RUNSTOP BIT(0)
70#define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
Srikanth Thokala9cd43602014-04-23 20:23:26 +053071
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +053072#define XILINX_DMA_REG_DMASR 0x0004
73#define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
74#define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
75#define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
76#define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
77#define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
78#define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
79#define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
80#define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
81#define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
82#define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
83#define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
84#define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
Andrea Merello05f7ea72018-11-20 16:31:49 +010085#define XILINX_DMA_DMASR_SG_MASK BIT(3)
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +053086#define XILINX_DMA_DMASR_IDLE BIT(1)
87#define XILINX_DMA_DMASR_HALTED BIT(0)
88#define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
89#define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
Srikanth Thokala9cd43602014-04-23 20:23:26 +053090
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +053091#define XILINX_DMA_REG_CURDESC 0x0008
92#define XILINX_DMA_REG_TAILDESC 0x0010
93#define XILINX_DMA_REG_REG_INDEX 0x0014
94#define XILINX_DMA_REG_FRMSTORE 0x0018
95#define XILINX_DMA_REG_THRESHOLD 0x001c
96#define XILINX_DMA_REG_FRMPTR_STS 0x0024
97#define XILINX_DMA_REG_PARK_PTR 0x0028
98#define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
Kedareswara rao Appanafe0503e2017-12-07 10:51:03 +053099#define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530100#define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
Kedareswara rao Appanafe0503e2017-12-07 10:51:03 +0530101#define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530102#define XILINX_DMA_REG_VDMA_VERSION 0x002c
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530103
104/* Register Direct Mode Registers */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530105#define XILINX_DMA_REG_VSIZE 0x0000
106#define XILINX_DMA_REG_HSIZE 0x0004
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530107
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530108#define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
109#define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
110#define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530111
112#define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530113#define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530114
Radhey Shyam Pandey0894aa22018-06-13 13:04:48 +0530115#define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec
116#define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
117
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530118/* HW specific definitions */
Radhey Shyam Pandeybcb2dc72019-10-22 22:30:20 +0530119#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530120
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530121#define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
122 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
123 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
124 XILINX_DMA_DMASR_ERR_IRQ)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530125
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530126#define XILINX_DMA_DMASR_ALL_ERR_MASK \
127 (XILINX_DMA_DMASR_EOL_LATE_ERR | \
128 XILINX_DMA_DMASR_SOF_LATE_ERR | \
129 XILINX_DMA_DMASR_SG_DEC_ERR | \
130 XILINX_DMA_DMASR_SG_SLV_ERR | \
131 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
132 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
133 XILINX_DMA_DMASR_DMA_DEC_ERR | \
134 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
135 XILINX_DMA_DMASR_DMA_INT_ERR)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530136
137/*
138 * Recoverable errors are DMA Internal error, SOF Early, EOF Early
139 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
140 * is enabled in the h/w system.
141 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530142#define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
143 (XILINX_DMA_DMASR_SOF_LATE_ERR | \
144 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
145 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
146 XILINX_DMA_DMASR_DMA_INT_ERR)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530147
148/* Axi VDMA Flush on Fsync bits */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530149#define XILINX_DMA_FLUSH_S2MM 3
150#define XILINX_DMA_FLUSH_MM2S 2
151#define XILINX_DMA_FLUSH_BOTH 1
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530152
153/* Delay loop counter to prevent hardware failure */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530154#define XILINX_DMA_LOOP_COUNT 1000000
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530155
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530156/* AXI DMA Specific Registers/Offsets */
157#define XILINX_DMA_REG_SRCDSTADDR 0x18
158#define XILINX_DMA_REG_BTT 0x28
159
160/* AXI DMA Specific Masks/Bit fields */
Radhey Shyam Pandeyae809692018-11-20 16:31:48 +0100161#define XILINX_DMA_MAX_TRANS_LEN_MIN 8
162#define XILINX_DMA_MAX_TRANS_LEN_MAX 23
163#define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530164#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530165#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530166#define XILINX_DMA_CR_COALESCE_SHIFT 16
167#define XILINX_DMA_BD_SOP BIT(27)
168#define XILINX_DMA_BD_EOP BIT(26)
169#define XILINX_DMA_COALESCE_MAX 255
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530170#define XILINX_DMA_NUM_DESCS 255
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530171#define XILINX_DMA_NUM_APP_WORDS 5
172
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530173/* AXI CDMA Specific Registers/Offsets */
174#define XILINX_CDMA_REG_SRCADDR 0x18
175#define XILINX_CDMA_REG_DSTADDR 0x20
176
177/* AXI CDMA Specific Masks */
178#define XILINX_CDMA_CR_SGMODE BIT(3)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530179
Radhey Shyam Pandey4e47d242018-09-29 11:17:59 -0600180#define xilinx_prep_dma_addr_t(addr) \
181 ((dma_addr_t)((u64)addr##_##msb << 32 | (addr)))
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530182/**
183 * struct xilinx_vdma_desc_hw - Hardware Descriptor
184 * @next_desc: Next Descriptor Pointer @0x00
185 * @pad1: Reserved @0x04
186 * @buf_addr: Buffer address @0x08
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530187 * @buf_addr_msb: MSB of Buffer address @0x0C
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530188 * @vsize: Vertical Size @0x10
189 * @hsize: Horizontal Size @0x14
190 * @stride: Number of bytes between the first
191 * pixels of each horizontal line @0x18
192 */
193struct xilinx_vdma_desc_hw {
194 u32 next_desc;
195 u32 pad1;
196 u32 buf_addr;
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530197 u32 buf_addr_msb;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530198 u32 vsize;
199 u32 hsize;
200 u32 stride;
201} __aligned(64);
202
203/**
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530204 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
205 * @next_desc: Next Descriptor Pointer @0x00
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +0530206 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530207 * @buf_addr: Buffer address @0x08
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +0530208 * @buf_addr_msb: MSB of Buffer address @0x0C
Radhey Shyam Pandeybcb2dc72019-10-22 22:30:20 +0530209 * @reserved1: Reserved @0x10
210 * @reserved2: Reserved @0x14
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530211 * @control: Control field @0x18
212 * @status: Status field @0x1C
213 * @app: APP Fields @0x20 - 0x30
214 */
215struct xilinx_axidma_desc_hw {
216 u32 next_desc;
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +0530217 u32 next_desc_msb;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530218 u32 buf_addr;
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +0530219 u32 buf_addr_msb;
Radhey Shyam Pandeybcb2dc72019-10-22 22:30:20 +0530220 u32 reserved1;
221 u32 reserved2;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530222 u32 control;
223 u32 status;
224 u32 app[XILINX_DMA_NUM_APP_WORDS];
225} __aligned(64);
226
227/**
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530228 * struct xilinx_cdma_desc_hw - Hardware Descriptor
229 * @next_desc: Next Descriptor Pointer @0x00
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +0530230 * @next_desc_msb: Next Descriptor Pointer MSB @0x04
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530231 * @src_addr: Source address @0x08
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +0530232 * @src_addr_msb: Source address MSB @0x0C
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530233 * @dest_addr: Destination address @0x10
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +0530234 * @dest_addr_msb: Destination address MSB @0x14
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530235 * @control: Control field @0x18
236 * @status: Status field @0x1C
237 */
238struct xilinx_cdma_desc_hw {
239 u32 next_desc;
Kedareswara rao Appana9791e712016-06-07 19:21:16 +0530240 u32 next_desc_msb;
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530241 u32 src_addr;
Kedareswara rao Appana9791e712016-06-07 19:21:16 +0530242 u32 src_addr_msb;
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530243 u32 dest_addr;
Kedareswara rao Appana9791e712016-06-07 19:21:16 +0530244 u32 dest_addr_msb;
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530245 u32 control;
246 u32 status;
247} __aligned(64);
248
249/**
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530250 * struct xilinx_vdma_tx_segment - Descriptor segment
251 * @hw: Hardware descriptor
252 * @node: Node in the descriptor segments list
253 * @phys: Physical address of segment
254 */
255struct xilinx_vdma_tx_segment {
256 struct xilinx_vdma_desc_hw hw;
257 struct list_head node;
258 dma_addr_t phys;
259} __aligned(64);
260
261/**
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530262 * struct xilinx_axidma_tx_segment - Descriptor segment
263 * @hw: Hardware descriptor
264 * @node: Node in the descriptor segments list
265 * @phys: Physical address of segment
266 */
267struct xilinx_axidma_tx_segment {
268 struct xilinx_axidma_desc_hw hw;
269 struct list_head node;
270 dma_addr_t phys;
271} __aligned(64);
272
273/**
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530274 * struct xilinx_cdma_tx_segment - Descriptor segment
275 * @hw: Hardware descriptor
276 * @node: Node in the descriptor segments list
277 * @phys: Physical address of segment
278 */
279struct xilinx_cdma_tx_segment {
280 struct xilinx_cdma_desc_hw hw;
281 struct list_head node;
282 dma_addr_t phys;
283} __aligned(64);
284
285/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530286 * struct xilinx_dma_tx_descriptor - Per Transaction structure
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530287 * @async_tx: Async transaction descriptor
288 * @segments: TX segments list
289 * @node: Node in the channel descriptors list
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530290 * @cyclic: Check for cyclic transfers.
Nicholas Graumannd8bae212019-10-15 20:18:22 +0530291 * @err: Whether the descriptor has an error.
292 * @residue: Residue of the completed descriptor
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530293 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530294struct xilinx_dma_tx_descriptor {
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530295 struct dma_async_tx_descriptor async_tx;
296 struct list_head segments;
297 struct list_head node;
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530298 bool cyclic;
Nicholas Graumannd8bae212019-10-15 20:18:22 +0530299 bool err;
300 u32 residue;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530301};
302
303/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530304 * struct xilinx_dma_chan - Driver specific DMA channel structure
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530305 * @xdev: Driver specific device structure
306 * @ctrl_offset: Control registers offset
307 * @desc_offset: TX descriptor registers offset
308 * @lock: Descriptor operation lock
309 * @pending_list: Descriptors waiting
Kedareswara rao Appana7096f362016-02-26 19:33:51 +0530310 * @active_list: Descriptors ready to submit
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530311 * @done_list: Complete descriptors
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530312 * @free_seg_list: Free descriptors
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530313 * @common: DMA common channel
314 * @desc_pool: Descriptors pool
315 * @dev: The dma device
316 * @irq: Channel IRQ
317 * @id: Channel ID
318 * @direction: Transfer direction
319 * @num_frms: Number of frames
320 * @has_sg: Support scatter transfers
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530321 * @cyclic: Check for cyclic transfers.
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530322 * @genlock: Support genlock mode
323 * @err: Channel has errors
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +0530324 * @idle: Check for channel idle
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530325 * @tasklet: Cleanup work after irq
326 * @config: Device configuration info
327 * @flush_on_fsync: Flush on Frame sync
Kedareswara rao Appana7096f362016-02-26 19:33:51 +0530328 * @desc_pendingcount: Descriptor pending count
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530329 * @ext_addr: Indicates 64 bit addressing is supported by dma channel
Kedareswara rao Appanaa65cf5122016-04-06 10:38:09 +0530330 * @desc_submitcount: Descriptor h/w submitted count
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530331 * @seg_v: Statically allocated segments base
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530332 * @seg_p: Physical allocated segments base
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530333 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530334 * @cyclic_seg_p: Physical allocated segments base for cyclic dma
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530335 * @start_transfer: Differentiate b/w DMA IP's transfer
Akinobu Mita676f9c22017-03-14 00:59:11 +0900336 * @stop_transfer: Differentiate b/w DMA IP's quiesce
Radhey Shyam Pandey0894aa22018-06-13 13:04:48 +0530337 * @has_vflip: S2MM vertical flip
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530338 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530339struct xilinx_dma_chan {
340 struct xilinx_dma_device *xdev;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530341 u32 ctrl_offset;
342 u32 desc_offset;
343 spinlock_t lock;
344 struct list_head pending_list;
Kedareswara rao Appana7096f362016-02-26 19:33:51 +0530345 struct list_head active_list;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530346 struct list_head done_list;
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530347 struct list_head free_seg_list;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530348 struct dma_chan common;
349 struct dma_pool *desc_pool;
350 struct device *dev;
351 int irq;
352 int id;
353 enum dma_transfer_direction direction;
354 int num_frms;
355 bool has_sg;
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530356 bool cyclic;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530357 bool genlock;
358 bool err;
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +0530359 bool idle;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530360 struct tasklet_struct tasklet;
361 struct xilinx_vdma_config config;
362 bool flush_on_fsync;
Kedareswara rao Appana7096f362016-02-26 19:33:51 +0530363 u32 desc_pendingcount;
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530364 bool ext_addr;
Kedareswara rao Appanaa65cf5122016-04-06 10:38:09 +0530365 u32 desc_submitcount;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530366 struct xilinx_axidma_tx_segment *seg_v;
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530367 dma_addr_t seg_p;
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530368 struct xilinx_axidma_tx_segment *cyclic_seg_v;
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530369 dma_addr_t cyclic_seg_p;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530370 void (*start_transfer)(struct xilinx_dma_chan *chan);
Akinobu Mita676f9c22017-03-14 00:59:11 +0900371 int (*stop_transfer)(struct xilinx_dma_chan *chan);
Radhey Shyam Pandey0894aa22018-06-13 13:04:48 +0530372 bool has_vflip;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530373};
374
Lars-Peter Clausenf3ae7d92017-09-05 16:43:49 +0200375/**
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +0530376 * enum xdma_ip_type - DMA IP type.
Lars-Peter Clausenf3ae7d92017-09-05 16:43:49 +0200377 *
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +0530378 * @XDMA_TYPE_AXIDMA: Axi dma ip.
379 * @XDMA_TYPE_CDMA: Axi cdma ip.
380 * @XDMA_TYPE_VDMA: Axi vdma ip.
Lars-Peter Clausenf3ae7d92017-09-05 16:43:49 +0200381 *
382 */
383enum xdma_ip_type {
384 XDMA_TYPE_AXIDMA = 0,
385 XDMA_TYPE_CDMA,
386 XDMA_TYPE_VDMA,
387};
388
Kedareswara rao Appanafb236662016-05-13 12:33:29 +0530389struct xilinx_dma_config {
390 enum xdma_ip_type dmatype;
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +0530391 int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
392 struct clk **tx_clk, struct clk **txs_clk,
393 struct clk **rx_clk, struct clk **rxs_clk);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530394};
395
396/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530397 * struct xilinx_dma_device - DMA device structure
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530398 * @regs: I/O mapped base address
399 * @dev: Device Structure
400 * @common: DMA device structure
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530401 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530402 * @flush_on_fsync: Flush on frame sync
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530403 * @ext_addr: Indicates 64 bit addressing is supported by dma device
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +0530404 * @pdev: Platform device structure pointer
Kedareswara rao Appanafb236662016-05-13 12:33:29 +0530405 * @dma_config: DMA config structure
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +0530406 * @axi_clk: DMA Axi4-lite interace clock
407 * @tx_clk: DMA mm2s clock
408 * @txs_clk: DMA mm2s stream clock
409 * @rx_clk: DMA s2mm clock
410 * @rxs_clk: DMA s2mm stream clock
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +0530411 * @nr_channels: Number of channels DMA device supports
412 * @chan_id: DMA channel identifier
Andrea Merello616f0f82018-11-20 16:31:45 +0100413 * @max_buffer_len: Max buffer length
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530414 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530415struct xilinx_dma_device {
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530416 void __iomem *regs;
417 struct device *dev;
418 struct dma_device common;
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530419 struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530420 u32 flush_on_fsync;
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530421 bool ext_addr;
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +0530422 struct platform_device *pdev;
Kedareswara rao Appanafb236662016-05-13 12:33:29 +0530423 const struct xilinx_dma_config *dma_config;
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +0530424 struct clk *axi_clk;
425 struct clk *tx_clk;
426 struct clk *txs_clk;
427 struct clk *rx_clk;
428 struct clk *rxs_clk;
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +0530429 u32 nr_channels;
430 u32 chan_id;
Andrea Merello616f0f82018-11-20 16:31:45 +0100431 u32 max_buffer_len;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530432};
433
434/* Macros */
435#define to_xilinx_chan(chan) \
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530436 container_of(chan, struct xilinx_dma_chan, common)
437#define to_dma_tx_descriptor(tx) \
438 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
439#define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
Kedareswara rao Appana9495f262016-02-26 19:33:54 +0530440 readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
441 cond, delay_us, timeout_us)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530442
443/* IO accessors */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530444static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530445{
446 return ioread32(chan->xdev->regs + reg);
447}
448
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530449static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530450{
451 iowrite32(value, chan->xdev->regs + reg);
452}
453
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530454static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530455 u32 value)
456{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530457 dma_write(chan, chan->desc_offset + reg, value);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530458}
459
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530460static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530461{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530462 return dma_read(chan, chan->ctrl_offset + reg);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530463}
464
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530465static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530466 u32 value)
467{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530468 dma_write(chan, chan->ctrl_offset + reg, value);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530469}
470
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530471static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530472 u32 clr)
473{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530474 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530475}
476
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530477static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530478 u32 set)
479{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530480 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530481}
482
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530483/**
484 * vdma_desc_write_64 - 64-bit descriptor write
485 * @chan: Driver specific VDMA channel
486 * @reg: Register to write
487 * @value_lsb: lower address of the descriptor.
488 * @value_msb: upper address of the descriptor.
489 *
490 * Since vdma driver is trying to write to a register offset which is not a
491 * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
492 * instead of a single 64 bit register write.
493 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530494static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530495 u32 value_lsb, u32 value_msb)
496{
497 /* Write the lsb 32 bits*/
498 writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
499
500 /* Write the msb 32 bits */
501 writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530502}
503
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +0530504static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
505{
506 lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
507}
508
509static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
510 dma_addr_t addr)
511{
512 if (chan->ext_addr)
513 dma_writeq(chan, reg, addr);
514 else
515 dma_ctrl_write(chan, reg, addr);
516}
517
518static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
519 struct xilinx_axidma_desc_hw *hw,
520 dma_addr_t buf_addr, size_t sg_used,
521 size_t period_len)
522{
523 if (chan->ext_addr) {
524 hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
525 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
526 period_len);
527 } else {
528 hw->buf_addr = buf_addr + sg_used + period_len;
529 }
530}
531
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530532/* -----------------------------------------------------------------------------
533 * Descriptors and segments alloc and free
534 */
535
536/**
537 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530538 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530539 *
540 * Return: The allocated segment on success and NULL on failure.
541 */
542static struct xilinx_vdma_tx_segment *
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530543xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530544{
545 struct xilinx_vdma_tx_segment *segment;
546 dma_addr_t phys;
547
Julia Lawall2ba4f8a2016-04-29 22:09:09 +0200548 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530549 if (!segment)
550 return NULL;
551
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530552 segment->phys = phys;
553
554 return segment;
555}
556
557/**
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530558 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
559 * @chan: Driver specific DMA channel
560 *
561 * Return: The allocated segment on success and NULL on failure.
562 */
563static struct xilinx_cdma_tx_segment *
564xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
565{
566 struct xilinx_cdma_tx_segment *segment;
567 dma_addr_t phys;
568
Kedareswara rao Appana62147862016-05-18 13:17:31 +0530569 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530570 if (!segment)
571 return NULL;
572
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530573 segment->phys = phys;
574
575 return segment;
576}
577
578/**
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530579 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
580 * @chan: Driver specific DMA channel
581 *
582 * Return: The allocated segment on success and NULL on failure.
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530583 */
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530584static struct xilinx_axidma_tx_segment *
585xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
586{
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530587 struct xilinx_axidma_tx_segment *segment = NULL;
588 unsigned long flags;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530589
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530590 spin_lock_irqsave(&chan->lock, flags);
591 if (!list_empty(&chan->free_seg_list)) {
592 segment = list_first_entry(&chan->free_seg_list,
593 struct xilinx_axidma_tx_segment,
594 node);
595 list_del(&segment->node);
596 }
597 spin_unlock_irqrestore(&chan->lock, flags);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530598
Nicholas Graumann722b9e6d2019-10-15 20:18:23 +0530599 if (!segment)
600 dev_dbg(chan->dev, "Could not find free tx segment\n");
601
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530602 return segment;
603}
604
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530605static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
606{
607 u32 next_desc = hw->next_desc;
608 u32 next_desc_msb = hw->next_desc_msb;
609
610 memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
611
612 hw->next_desc = next_desc;
613 hw->next_desc_msb = next_desc_msb;
614}
615
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530616/**
617 * xilinx_dma_free_tx_segment - Free transaction segment
618 * @chan: Driver specific DMA channel
619 * @segment: DMA transaction segment
620 */
621static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
622 struct xilinx_axidma_tx_segment *segment)
623{
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530624 xilinx_dma_clean_hw_desc(&segment->hw);
625
626 list_add_tail(&segment->node, &chan->free_seg_list);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530627}
628
629/**
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530630 * xilinx_cdma_free_tx_segment - Free transaction segment
631 * @chan: Driver specific DMA channel
632 * @segment: DMA transaction segment
633 */
634static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
635 struct xilinx_cdma_tx_segment *segment)
636{
637 dma_pool_free(chan->desc_pool, segment, segment->phys);
638}
639
640/**
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530641 * xilinx_vdma_free_tx_segment - Free transaction segment
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530642 * @chan: Driver specific DMA channel
643 * @segment: DMA transaction segment
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530644 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530645static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530646 struct xilinx_vdma_tx_segment *segment)
647{
648 dma_pool_free(chan->desc_pool, segment, segment->phys);
649}
650
651/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530652 * xilinx_dma_tx_descriptor - Allocate transaction descriptor
653 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530654 *
655 * Return: The allocated descriptor on success and NULL on failure.
656 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530657static struct xilinx_dma_tx_descriptor *
658xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530659{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530660 struct xilinx_dma_tx_descriptor *desc;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530661
662 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
663 if (!desc)
664 return NULL;
665
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530666 INIT_LIST_HEAD(&desc->segments);
667
668 return desc;
669}
670
671/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530672 * xilinx_dma_free_tx_descriptor - Free transaction descriptor
673 * @chan: Driver specific DMA channel
674 * @desc: DMA transaction descriptor
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530675 */
676static void
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530677xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
678 struct xilinx_dma_tx_descriptor *desc)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530679{
680 struct xilinx_vdma_tx_segment *segment, *next;
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530681 struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530682 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530683
684 if (!desc)
685 return;
686
Kedareswara rao Appanafb236662016-05-13 12:33:29 +0530687 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530688 list_for_each_entry_safe(segment, next, &desc->segments, node) {
689 list_del(&segment->node);
690 xilinx_vdma_free_tx_segment(chan, segment);
691 }
Kedareswara rao Appanafb236662016-05-13 12:33:29 +0530692 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530693 list_for_each_entry_safe(cdma_segment, cdma_next,
694 &desc->segments, node) {
695 list_del(&cdma_segment->node);
696 xilinx_cdma_free_tx_segment(chan, cdma_segment);
697 }
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530698 } else {
699 list_for_each_entry_safe(axidma_segment, axidma_next,
700 &desc->segments, node) {
701 list_del(&axidma_segment->node);
702 xilinx_dma_free_tx_segment(chan, axidma_segment);
703 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530704 }
705
706 kfree(desc);
707}
708
709/* Required functions */
710
711/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530712 * xilinx_dma_free_desc_list - Free descriptors list
713 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530714 * @list: List to parse and delete the descriptor
715 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530716static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530717 struct list_head *list)
718{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530719 struct xilinx_dma_tx_descriptor *desc, *next;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530720
721 list_for_each_entry_safe(desc, next, list, node) {
722 list_del(&desc->node);
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530723 xilinx_dma_free_tx_descriptor(chan, desc);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530724 }
725}
726
727/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530728 * xilinx_dma_free_descriptors - Free channel descriptors
729 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530730 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530731static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530732{
733 unsigned long flags;
734
735 spin_lock_irqsave(&chan->lock, flags);
736
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530737 xilinx_dma_free_desc_list(chan, &chan->pending_list);
738 xilinx_dma_free_desc_list(chan, &chan->done_list);
739 xilinx_dma_free_desc_list(chan, &chan->active_list);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530740
741 spin_unlock_irqrestore(&chan->lock, flags);
742}
743
744/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530745 * xilinx_dma_free_chan_resources - Free channel resources
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530746 * @dchan: DMA channel
747 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530748static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530749{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530750 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530751 unsigned long flags;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530752
753 dev_dbg(chan->dev, "Free all channel resources.\n");
754
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530755 xilinx_dma_free_descriptors(chan);
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530756
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530757 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530758 spin_lock_irqsave(&chan->lock, flags);
759 INIT_LIST_HEAD(&chan->free_seg_list);
760 spin_unlock_irqrestore(&chan->lock, flags);
761
Kedareswara rao Appana0e847d42018-01-03 12:12:11 +0530762 /* Free memory that is allocated for BD */
763 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
764 XILINX_DMA_NUM_DESCS, chan->seg_v,
765 chan->seg_p);
766
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530767 /* Free Memory that is allocated for cyclic DMA Mode */
768 dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
769 chan->cyclic_seg_v, chan->cyclic_seg_p);
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530770 }
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530771
772 if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) {
773 dma_pool_destroy(chan->desc_pool);
774 chan->desc_pool = NULL;
775 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530776}
777
778/**
Nicholas Graumanna575d0b2019-10-15 20:18:21 +0530779 * xilinx_dma_get_residue - Compute residue for a given descriptor
780 * @chan: Driver specific dma channel
781 * @desc: dma transaction descriptor
782 *
783 * Return: The number of residue bytes for the descriptor.
784 */
785static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
786 struct xilinx_dma_tx_descriptor *desc)
787{
788 struct xilinx_cdma_tx_segment *cdma_seg;
789 struct xilinx_axidma_tx_segment *axidma_seg;
790 struct xilinx_cdma_desc_hw *cdma_hw;
791 struct xilinx_axidma_desc_hw *axidma_hw;
792 struct list_head *entry;
793 u32 residue = 0;
794
795 list_for_each(entry, &desc->segments) {
796 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
797 cdma_seg = list_entry(entry,
798 struct xilinx_cdma_tx_segment,
799 node);
800 cdma_hw = &cdma_seg->hw;
801 residue += (cdma_hw->control - cdma_hw->status) &
802 chan->xdev->max_buffer_len;
803 } else {
804 axidma_seg = list_entry(entry,
805 struct xilinx_axidma_tx_segment,
806 node);
807 axidma_hw = &axidma_seg->hw;
808 residue += (axidma_hw->control - axidma_hw->status) &
809 chan->xdev->max_buffer_len;
810 }
811 }
812
813 return residue;
814}
815
816/**
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530817 * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
818 * @chan: Driver specific dma channel
819 * @desc: dma transaction descriptor
820 * @flags: flags for spin lock
821 */
822static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
823 struct xilinx_dma_tx_descriptor *desc,
824 unsigned long *flags)
825{
826 dma_async_tx_callback callback;
827 void *callback_param;
828
829 callback = desc->async_tx.callback;
830 callback_param = desc->async_tx.callback_param;
831 if (callback) {
832 spin_unlock_irqrestore(&chan->lock, *flags);
833 callback(callback_param);
834 spin_lock_irqsave(&chan->lock, *flags);
835 }
836}
837
838/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530839 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
840 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530841 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530842static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530843{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530844 struct xilinx_dma_tx_descriptor *desc, *next;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530845 unsigned long flags;
846
847 spin_lock_irqsave(&chan->lock, flags);
848
849 list_for_each_entry_safe(desc, next, &chan->done_list, node) {
Nicholas Graumannd8bae212019-10-15 20:18:22 +0530850 struct dmaengine_result result;
851
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530852 if (desc->cyclic) {
853 xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
854 break;
855 }
856
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530857 /* Remove from the list of running transactions */
858 list_del(&desc->node);
859
Nicholas Graumannd8bae212019-10-15 20:18:22 +0530860 if (unlikely(desc->err)) {
861 if (chan->direction == DMA_DEV_TO_MEM)
862 result.result = DMA_TRANS_READ_FAILED;
863 else
864 result.result = DMA_TRANS_WRITE_FAILED;
865 } else {
866 result.result = DMA_TRANS_NOERROR;
867 }
868
869 result.residue = desc->residue;
870
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530871 /* Run the link descriptor callback function */
Radhey Shyam Pandey005a0172019-10-15 20:18:18 +0530872 spin_unlock_irqrestore(&chan->lock, flags);
Nicholas Graumannd8bae212019-10-15 20:18:22 +0530873 dmaengine_desc_get_callback_invoke(&desc->async_tx, &result);
Radhey Shyam Pandey005a0172019-10-15 20:18:18 +0530874 spin_lock_irqsave(&chan->lock, flags);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530875
876 /* Run any dependencies, then free the descriptor */
877 dma_run_dependencies(&desc->async_tx);
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530878 xilinx_dma_free_tx_descriptor(chan, desc);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530879 }
880
881 spin_unlock_irqrestore(&chan->lock, flags);
882}
883
884/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530885 * xilinx_dma_do_tasklet - Schedule completion tasklet
886 * @data: Pointer to the Xilinx DMA channel structure
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530887 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530888static void xilinx_dma_do_tasklet(unsigned long data)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530889{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530890 struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530891
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530892 xilinx_dma_chan_desc_cleanup(chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530893}
894
895/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530896 * xilinx_dma_alloc_chan_resources - Allocate channel resources
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530897 * @dchan: DMA channel
898 *
899 * Return: '0' on success and failure value on error
900 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530901static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530902{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530903 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530904 int i;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530905
906 /* Has this channel already been allocated? */
907 if (chan->desc_pool)
908 return 0;
909
910 /*
911 * We need the descriptor to be aligned to 64bytes
912 * for meeting Xilinx VDMA specification requirement.
913 */
Kedareswara rao Appanafb236662016-05-13 12:33:29 +0530914 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530915 /* Allocate the buffer descriptors. */
Luis Chamberlain750afb02019-01-04 09:23:09 +0100916 chan->seg_v = dma_alloc_coherent(chan->dev,
917 sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS,
918 &chan->seg_p, GFP_KERNEL);
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530919 if (!chan->seg_v) {
920 dev_err(chan->dev,
921 "unable to allocate channel %d descriptors\n",
922 chan->id);
923 return -ENOMEM;
924 }
Radhey Shyam Pandey91b43822018-09-29 11:17:57 -0600925 /*
926 * For cyclic DMA mode we need to program the tail Descriptor
927 * register with a value which is not a part of the BD chain
928 * so allocating a desc segment during channel allocation for
929 * programming tail descriptor.
930 */
Luis Chamberlain750afb02019-01-04 09:23:09 +0100931 chan->cyclic_seg_v = dma_alloc_coherent(chan->dev,
932 sizeof(*chan->cyclic_seg_v),
933 &chan->cyclic_seg_p,
934 GFP_KERNEL);
Radhey Shyam Pandey91b43822018-09-29 11:17:57 -0600935 if (!chan->cyclic_seg_v) {
936 dev_err(chan->dev,
937 "unable to allocate desc segment for cyclic DMA\n");
938 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
939 XILINX_DMA_NUM_DESCS, chan->seg_v,
940 chan->seg_p);
941 return -ENOMEM;
942 }
943 chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530944
945 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
946 chan->seg_v[i].hw.next_desc =
947 lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
948 ((i + 1) % XILINX_DMA_NUM_DESCS));
949 chan->seg_v[i].hw.next_desc_msb =
950 upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
951 ((i + 1) % XILINX_DMA_NUM_DESCS));
952 chan->seg_v[i].phys = chan->seg_p +
953 sizeof(*chan->seg_v) * i;
954 list_add_tail(&chan->seg_v[i].node,
955 &chan->free_seg_list);
956 }
Kedareswara rao Appanafb236662016-05-13 12:33:29 +0530957 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530958 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
959 chan->dev,
960 sizeof(struct xilinx_cdma_tx_segment),
961 __alignof__(struct xilinx_cdma_tx_segment),
962 0);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530963 } else {
964 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
965 chan->dev,
966 sizeof(struct xilinx_vdma_tx_segment),
967 __alignof__(struct xilinx_vdma_tx_segment),
968 0);
969 }
970
Kedareswara rao Appana23059402017-12-07 10:51:04 +0530971 if (!chan->desc_pool &&
972 (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) {
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530973 dev_err(chan->dev,
974 "unable to allocate channel %d descriptor pool\n",
975 chan->id);
976 return -ENOMEM;
977 }
978
979 dma_cookie_init(dchan);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530980
Kedareswara rao Appanafb236662016-05-13 12:33:29 +0530981 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530982 /* For AXI DMA resetting once channel will reset the
983 * other channel as well so enable the interrupts here.
984 */
985 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
986 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
987 }
988
Kedareswara rao Appanafb236662016-05-13 12:33:29 +0530989 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530990 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
991 XILINX_CDMA_CR_SGMODE);
992
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530993 return 0;
994}
995
996/**
Andrea Merello616f0f82018-11-20 16:31:45 +0100997 * xilinx_dma_calc_copysize - Calculate the amount of data to copy
998 * @chan: Driver specific DMA channel
999 * @size: Total data that needs to be copied
1000 * @done: Amount of data that has been already copied
1001 *
1002 * Return: Amount of data that has to be copied
1003 */
1004static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan,
1005 int size, int done)
1006{
1007 size_t copy;
1008
1009 copy = min_t(size_t, size - done,
1010 chan->xdev->max_buffer_len);
1011
Andrea Merello5c094d42018-11-20 16:31:46 +01001012 if ((copy + done < size) &&
1013 chan->xdev->common.copy_align) {
1014 /*
1015 * If this is not the last descriptor, make sure
1016 * the next one will be properly aligned
1017 */
1018 copy = rounddown(copy,
1019 (1 << chan->xdev->common.copy_align));
1020 }
Andrea Merello616f0f82018-11-20 16:31:45 +01001021 return copy;
1022}
1023
1024/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301025 * xilinx_dma_tx_status - Get DMA transaction status
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301026 * @dchan: DMA channel
1027 * @cookie: Transaction identifier
1028 * @txstate: Transaction state
1029 *
1030 * Return: DMA transaction status
1031 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301032static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301033 dma_cookie_t cookie,
1034 struct dma_tx_state *txstate)
1035{
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301036 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1037 struct xilinx_dma_tx_descriptor *desc;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301038 enum dma_status ret;
1039 unsigned long flags;
1040 u32 residue = 0;
1041
1042 ret = dma_cookie_status(dchan, cookie, txstate);
1043 if (ret == DMA_COMPLETE || !txstate)
1044 return ret;
1045
Nicholas Graumanna575d0b2019-10-15 20:18:21 +05301046 spin_lock_irqsave(&chan->lock, flags);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301047
Nicholas Graumanna575d0b2019-10-15 20:18:21 +05301048 desc = list_last_entry(&chan->active_list,
1049 struct xilinx_dma_tx_descriptor, node);
1050 /*
1051 * VDMA and simple mode do not support residue reporting, so the
1052 * residue field will always be 0.
1053 */
1054 if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
1055 residue = xilinx_dma_get_residue(chan, desc);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301056
Nicholas Graumanna575d0b2019-10-15 20:18:21 +05301057 spin_unlock_irqrestore(&chan->lock, flags);
1058
1059 dma_set_residue(txstate, residue);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301060
1061 return ret;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301062}
1063
1064/**
Akinobu Mita676f9c22017-03-14 00:59:11 +09001065 * xilinx_dma_stop_transfer - Halt DMA channel
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301066 * @chan: Driver specific DMA channel
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +05301067 *
1068 * Return: '0' on success and failure value on error
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301069 */
Akinobu Mita676f9c22017-03-14 00:59:11 +09001070static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301071{
Kedareswara rao Appana9495f262016-02-26 19:33:54 +05301072 u32 val;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301073
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301074 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301075
1076 /* Wait for the hardware to halt */
Akinobu Mita676f9c22017-03-14 00:59:11 +09001077 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1078 val & XILINX_DMA_DMASR_HALTED, 0,
1079 XILINX_DMA_LOOP_COUNT);
1080}
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301081
Akinobu Mita676f9c22017-03-14 00:59:11 +09001082/**
1083 * xilinx_cdma_stop_transfer - Wait for the current transfer to complete
1084 * @chan: Driver specific DMA channel
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +05301085 *
1086 * Return: '0' on success and failure value on error
Akinobu Mita676f9c22017-03-14 00:59:11 +09001087 */
1088static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
1089{
1090 u32 val;
1091
1092 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1093 val & XILINX_DMA_DMASR_IDLE, 0,
1094 XILINX_DMA_LOOP_COUNT);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301095}
1096
1097/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301098 * xilinx_dma_start - Start DMA channel
1099 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301100 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301101static void xilinx_dma_start(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301102{
Kedareswara rao Appana69490632016-03-03 23:02:42 +05301103 int err;
Kedareswara rao Appana9495f262016-02-26 19:33:54 +05301104 u32 val;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301105
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301106 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301107
1108 /* Wait for the hardware to start */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301109 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1110 !(val & XILINX_DMA_DMASR_HALTED), 0,
1111 XILINX_DMA_LOOP_COUNT);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301112
Kedareswara rao Appana9495f262016-02-26 19:33:54 +05301113 if (err) {
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301114 dev_err(chan->dev, "Cannot start channel %p: %x\n",
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301115 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301116
1117 chan->err = true;
1118 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301119}
1120
1121/**
1122 * xilinx_vdma_start_transfer - Starts VDMA transfer
1123 * @chan: Driver specific channel struct pointer
1124 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301125static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301126{
1127 struct xilinx_vdma_config *config = &chan->config;
Vinod Koulf935d7d2019-05-21 19:36:44 +05301128 struct xilinx_dma_tx_descriptor *desc;
Kedareswara rao Appanafe0503e2017-12-07 10:51:03 +05301129 u32 reg, j;
Andrea Merellob8349172018-11-20 16:31:51 +01001130 struct xilinx_vdma_tx_segment *segment, *last = NULL;
1131 int i = 0;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301132
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301133 /* This function was invoked with lock held */
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301134 if (chan->err)
1135 return;
1136
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05301137 if (!chan->idle)
1138 return;
1139
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301140 if (list_empty(&chan->pending_list))
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301141 return;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301142
1143 desc = list_first_entry(&chan->pending_list,
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301144 struct xilinx_dma_tx_descriptor, node);
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301145
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301146 /* Configure the hardware using info in the config structure */
Radhey Shyam Pandey0894aa22018-06-13 13:04:48 +05301147 if (chan->has_vflip) {
1148 reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
1149 reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP;
1150 reg |= config->vflip_en;
1151 dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP,
1152 reg);
1153 }
1154
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301155 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301156
1157 if (config->frm_cnt_en)
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301158 reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301159 else
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301160 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301161
Andrea Merellob8349172018-11-20 16:31:51 +01001162 /* If not parking, enable circular mode */
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301163 if (config->park)
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301164 reg &= ~XILINX_DMA_DMACR_CIRC_EN;
Andrea Merellob8349172018-11-20 16:31:51 +01001165 else
1166 reg |= XILINX_DMA_DMACR_CIRC_EN;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301167
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301168 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301169
Kedareswara rao Appanafe0503e2017-12-07 10:51:03 +05301170 j = chan->desc_submitcount;
1171 reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
1172 if (chan->direction == DMA_MEM_TO_DEV) {
1173 reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
1174 reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
1175 } else {
1176 reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
1177 reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301178 }
Kedareswara rao Appanafe0503e2017-12-07 10:51:03 +05301179 dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301180
1181 /* Start the hardware */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301182 xilinx_dma_start(chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301183
1184 if (chan->err)
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301185 return;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301186
1187 /* Start the transfer */
Andrea Merellob8349172018-11-20 16:31:51 +01001188 if (chan->desc_submitcount < chan->num_frms)
1189 i = chan->desc_submitcount;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301190
Andrea Merellob8349172018-11-20 16:31:51 +01001191 list_for_each_entry(segment, &desc->segments, node) {
1192 if (chan->ext_addr)
1193 vdma_desc_write_64(chan,
1194 XILINX_VDMA_REG_START_ADDRESS_64(i++),
1195 segment->hw.buf_addr,
1196 segment->hw.buf_addr_msb);
1197 else
1198 vdma_desc_write(chan,
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301199 XILINX_VDMA_REG_START_ADDRESS(i++),
1200 segment->hw.buf_addr);
Kedareswara rao Appanab72db402016-04-06 10:38:08 +05301201
Andrea Merellob8349172018-11-20 16:31:51 +01001202 last = segment;
Kedareswara rao Appanaa65cf5122016-04-06 10:38:09 +05301203 }
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05301204
Andrea Merellob8349172018-11-20 16:31:51 +01001205 if (!last)
1206 return;
1207
1208 /* HW expects these parameters to be same for one transaction */
1209 vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
1210 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
1211 last->hw.stride);
1212 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
1213
1214 chan->desc_submitcount++;
1215 chan->desc_pendingcount--;
1216 list_del(&desc->node);
1217 list_add_tail(&desc->node, &chan->active_list);
1218 if (chan->desc_submitcount == chan->num_frms)
1219 chan->desc_submitcount = 0;
1220
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05301221 chan->idle = false;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301222}
1223
1224/**
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301225 * xilinx_cdma_start_transfer - Starts cdma transfer
1226 * @chan: Driver specific channel struct pointer
1227 */
1228static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1229{
1230 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1231 struct xilinx_cdma_tx_segment *tail_segment;
1232 u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
1233
1234 if (chan->err)
1235 return;
1236
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05301237 if (!chan->idle)
1238 return;
1239
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301240 if (list_empty(&chan->pending_list))
1241 return;
1242
1243 head_desc = list_first_entry(&chan->pending_list,
1244 struct xilinx_dma_tx_descriptor, node);
1245 tail_desc = list_last_entry(&chan->pending_list,
1246 struct xilinx_dma_tx_descriptor, node);
1247 tail_segment = list_last_entry(&tail_desc->segments,
1248 struct xilinx_cdma_tx_segment, node);
1249
1250 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1251 ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1252 ctrl_reg |= chan->desc_pendingcount <<
1253 XILINX_DMA_CR_COALESCE_SHIFT;
1254 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
1255 }
1256
1257 if (chan->has_sg) {
Kedareswara rao Appana48c62fb2018-01-03 12:12:09 +05301258 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
1259 XILINX_CDMA_CR_SGMODE);
1260
1261 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1262 XILINX_CDMA_CR_SGMODE);
1263
Kedareswara rao Appana9791e712016-06-07 19:21:16 +05301264 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1265 head_desc->async_tx.phys);
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301266
1267 /* Update tail ptr register which will start the transfer */
Kedareswara rao Appana9791e712016-06-07 19:21:16 +05301268 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1269 tail_segment->phys);
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301270 } else {
1271 /* In simple mode */
1272 struct xilinx_cdma_tx_segment *segment;
1273 struct xilinx_cdma_desc_hw *hw;
1274
1275 segment = list_first_entry(&head_desc->segments,
1276 struct xilinx_cdma_tx_segment,
1277 node);
1278
1279 hw = &segment->hw;
1280
Radhey Shyam Pandey0e03aca2018-09-29 11:18:00 -06001281 xilinx_write(chan, XILINX_CDMA_REG_SRCADDR,
1282 xilinx_prep_dma_addr_t(hw->src_addr));
1283 xilinx_write(chan, XILINX_CDMA_REG_DSTADDR,
1284 xilinx_prep_dma_addr_t(hw->dest_addr));
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301285
1286 /* Start the transfer */
1287 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
Andrea Merello616f0f82018-11-20 16:31:45 +01001288 hw->control & chan->xdev->max_buffer_len);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301289 }
1290
1291 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1292 chan->desc_pendingcount = 0;
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05301293 chan->idle = false;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301294}
1295
1296/**
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301297 * xilinx_dma_start_transfer - Starts DMA transfer
1298 * @chan: Driver specific channel struct pointer
1299 */
1300static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1301{
1302 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
Kedareswara rao Appana23059402017-12-07 10:51:04 +05301303 struct xilinx_axidma_tx_segment *tail_segment;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301304 u32 reg;
1305
1306 if (chan->err)
1307 return;
1308
1309 if (list_empty(&chan->pending_list))
1310 return;
1311
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05301312 if (!chan->idle)
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301313 return;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301314
1315 head_desc = list_first_entry(&chan->pending_list,
1316 struct xilinx_dma_tx_descriptor, node);
1317 tail_desc = list_last_entry(&chan->pending_list,
1318 struct xilinx_dma_tx_descriptor, node);
1319 tail_segment = list_last_entry(&tail_desc->segments,
1320 struct xilinx_axidma_tx_segment, node);
1321
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301322 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1323
1324 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1325 reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1326 reg |= chan->desc_pendingcount <<
1327 XILINX_DMA_CR_COALESCE_SHIFT;
1328 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1329 }
1330
Radhey Shyam Pandeybcb2dc72019-10-22 22:30:20 +05301331 if (chan->has_sg)
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +05301332 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1333 head_desc->async_tx.phys);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301334
1335 xilinx_dma_start(chan);
1336
1337 if (chan->err)
1338 return;
1339
1340 /* Start the transfer */
Radhey Shyam Pandeybcb2dc72019-10-22 22:30:20 +05301341 if (chan->has_sg) {
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301342 if (chan->cyclic)
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +05301343 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1344 chan->cyclic_seg_v->phys);
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301345 else
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +05301346 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1347 tail_segment->phys);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301348 } else {
1349 struct xilinx_axidma_tx_segment *segment;
1350 struct xilinx_axidma_desc_hw *hw;
1351
1352 segment = list_first_entry(&head_desc->segments,
1353 struct xilinx_axidma_tx_segment,
1354 node);
1355 hw = &segment->hw;
1356
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +05301357 xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301358
1359 /* Start the transfer */
1360 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
Andrea Merello616f0f82018-11-20 16:31:45 +01001361 hw->control & chan->xdev->max_buffer_len);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301362 }
1363
1364 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1365 chan->desc_pendingcount = 0;
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05301366 chan->idle = false;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301367}
1368
1369/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301370 * xilinx_dma_issue_pending - Issue pending transactions
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301371 * @dchan: DMA channel
1372 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301373static void xilinx_dma_issue_pending(struct dma_chan *dchan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301374{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301375 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301376 unsigned long flags;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301377
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301378 spin_lock_irqsave(&chan->lock, flags);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301379 chan->start_transfer(chan);
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301380 spin_unlock_irqrestore(&chan->lock, flags);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301381}
1382
1383/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301384 * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301385 * @chan : xilinx DMA channel
1386 *
1387 * CONTEXT: hardirq
1388 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301389static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301390{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301391 struct xilinx_dma_tx_descriptor *desc, *next;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301392
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301393 /* This function was invoked with lock held */
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301394 if (list_empty(&chan->active_list))
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301395 return;
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301396
1397 list_for_each_entry_safe(desc, next, &chan->active_list, node) {
Nicholas Graumannd8bae212019-10-15 20:18:22 +05301398 if (chan->has_sg && chan->xdev->dma_config->dmatype !=
1399 XDMA_TYPE_VDMA)
1400 desc->residue = xilinx_dma_get_residue(chan, desc);
1401 else
1402 desc->residue = 0;
1403 desc->err = chan->err;
1404
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301405 list_del(&desc->node);
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301406 if (!desc->cyclic)
1407 dma_cookie_complete(&desc->async_tx);
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301408 list_add_tail(&desc->node, &chan->done_list);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301409 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301410}
1411
1412/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301413 * xilinx_dma_reset - Reset DMA channel
1414 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301415 *
1416 * Return: '0' on success and failure value on error
1417 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301418static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301419{
Kedareswara rao Appana69490632016-03-03 23:02:42 +05301420 int err;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301421 u32 tmp;
1422
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301423 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301424
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301425 /* Wait for the hardware to finish reset */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301426 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
1427 !(tmp & XILINX_DMA_DMACR_RESET), 0,
1428 XILINX_DMA_LOOP_COUNT);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301429
Kedareswara rao Appana9495f262016-02-26 19:33:54 +05301430 if (err) {
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301431 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301432 dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
1433 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301434 return -ETIMEDOUT;
1435 }
1436
1437 chan->err = false;
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05301438 chan->idle = true;
Nicholas Graumann8a631a52019-10-15 20:18:24 +05301439 chan->desc_pendingcount = 0;
Kedareswara rao Appanafe0503e2017-12-07 10:51:03 +05301440 chan->desc_submitcount = 0;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301441
Kedareswara rao Appana9495f262016-02-26 19:33:54 +05301442 return err;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301443}
1444
1445/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301446 * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
1447 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301448 *
1449 * Return: '0' on success and failure value on error
1450 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301451static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301452{
1453 int err;
1454
1455 /* Reset VDMA */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301456 err = xilinx_dma_reset(chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301457 if (err)
1458 return err;
1459
1460 /* Enable interrupts */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301461 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1462 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301463
1464 return 0;
1465}
1466
1467/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301468 * xilinx_dma_irq_handler - DMA Interrupt handler
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301469 * @irq: IRQ number
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301470 * @data: Pointer to the Xilinx DMA channel structure
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301471 *
1472 * Return: IRQ_HANDLED/IRQ_NONE
1473 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301474static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301475{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301476 struct xilinx_dma_chan *chan = data;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301477 u32 status;
1478
1479 /* Read the status and ack the interrupts. */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301480 status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
1481 if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301482 return IRQ_NONE;
1483
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301484 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1485 status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301486
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301487 if (status & XILINX_DMA_DMASR_ERR_IRQ) {
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301488 /*
1489 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
1490 * error is recoverable, ignore it. Otherwise flag the error.
1491 *
1492 * Only recoverable errors can be cleared in the DMASR register,
1493 * make sure not to write to other error bits to 1.
1494 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301495 u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
Kedareswara rao Appana48a59ed2016-04-06 10:44:55 +05301496
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301497 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1498 errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301499
1500 if (!chan->flush_on_fsync ||
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301501 (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301502 dev_err(chan->dev,
1503 "Channel %p has errors %x, cdr %x tdr %x\n",
1504 chan, errors,
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301505 dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
1506 dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301507 chan->err = true;
1508 }
1509 }
1510
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301511 if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301512 /*
1513 * Device takes too long to do the transfer when user requires
1514 * responsiveness.
1515 */
1516 dev_dbg(chan->dev, "Inter-packet latency too long\n");
1517 }
1518
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301519 if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301520 spin_lock(&chan->lock);
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301521 xilinx_dma_complete_descriptor(chan);
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05301522 chan->idle = true;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301523 chan->start_transfer(chan);
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301524 spin_unlock(&chan->lock);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301525 }
1526
1527 tasklet_schedule(&chan->tasklet);
1528 return IRQ_HANDLED;
1529}
1530
1531/**
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301532 * append_desc_queue - Queuing descriptor
1533 * @chan: Driver specific dma channel
1534 * @desc: dma transaction descriptor
1535 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301536static void append_desc_queue(struct xilinx_dma_chan *chan,
1537 struct xilinx_dma_tx_descriptor *desc)
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301538{
1539 struct xilinx_vdma_tx_segment *tail_segment;
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301540 struct xilinx_dma_tx_descriptor *tail_desc;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301541 struct xilinx_axidma_tx_segment *axidma_tail_segment;
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301542 struct xilinx_cdma_tx_segment *cdma_tail_segment;
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301543
1544 if (list_empty(&chan->pending_list))
1545 goto append;
1546
1547 /*
1548 * Add the hardware descriptor to the chain of hardware descriptors
1549 * that already exists in memory.
1550 */
1551 tail_desc = list_last_entry(&chan->pending_list,
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301552 struct xilinx_dma_tx_descriptor, node);
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05301553 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301554 tail_segment = list_last_entry(&tail_desc->segments,
1555 struct xilinx_vdma_tx_segment,
1556 node);
1557 tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05301558 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301559 cdma_tail_segment = list_last_entry(&tail_desc->segments,
1560 struct xilinx_cdma_tx_segment,
1561 node);
1562 cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301563 } else {
1564 axidma_tail_segment = list_last_entry(&tail_desc->segments,
1565 struct xilinx_axidma_tx_segment,
1566 node);
1567 axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1568 }
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301569
1570 /*
1571 * Add the software descriptor and all children to the list
1572 * of pending transactions
1573 */
1574append:
1575 list_add_tail(&desc->node, &chan->pending_list);
1576 chan->desc_pendingcount++;
1577
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05301578 if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
1579 && unlikely(chan->desc_pendingcount > chan->num_frms)) {
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301580 dev_dbg(chan->dev, "desc pendingcount is too high\n");
1581 chan->desc_pendingcount = chan->num_frms;
1582 }
1583}
1584
1585/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301586 * xilinx_dma_tx_submit - Submit DMA transaction
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301587 * @tx: Async transaction descriptor
1588 *
1589 * Return: cookie value on success and failure value on error
1590 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301591static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301592{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301593 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
1594 struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301595 dma_cookie_t cookie;
1596 unsigned long flags;
1597 int err;
1598
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301599 if (chan->cyclic) {
1600 xilinx_dma_free_tx_descriptor(chan, desc);
1601 return -EBUSY;
1602 }
1603
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301604 if (chan->err) {
1605 /*
1606 * If reset fails, need to hard reset the system.
1607 * Channel is no longer functional
1608 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301609 err = xilinx_dma_chan_reset(chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301610 if (err < 0)
1611 return err;
1612 }
1613
1614 spin_lock_irqsave(&chan->lock, flags);
1615
1616 cookie = dma_cookie_assign(tx);
1617
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301618 /* Put this transaction onto the tail of the pending queue */
1619 append_desc_queue(chan, desc);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301620
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301621 if (desc->cyclic)
1622 chan->cyclic = true;
1623
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301624 spin_unlock_irqrestore(&chan->lock, flags);
1625
1626 return cookie;
1627}
1628
1629/**
1630 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
1631 * DMA_SLAVE transaction
1632 * @dchan: DMA channel
1633 * @xt: Interleaved template pointer
1634 * @flags: transfer ack flags
1635 *
1636 * Return: Async transaction descriptor on success and NULL on failure
1637 */
1638static struct dma_async_tx_descriptor *
1639xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
1640 struct dma_interleaved_template *xt,
1641 unsigned long flags)
1642{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301643 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1644 struct xilinx_dma_tx_descriptor *desc;
Kedareswara rao Appana4b597c62018-01-03 12:12:10 +05301645 struct xilinx_vdma_tx_segment *segment;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301646 struct xilinx_vdma_desc_hw *hw;
1647
1648 if (!is_slave_direction(xt->dir))
1649 return NULL;
1650
1651 if (!xt->numf || !xt->sgl[0].size)
1652 return NULL;
1653
Srikanth Thokalaa5e48e22014-11-05 20:37:01 +02001654 if (xt->frame_size != 1)
1655 return NULL;
1656
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301657 /* Allocate a transaction descriptor. */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301658 desc = xilinx_dma_alloc_tx_descriptor(chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301659 if (!desc)
1660 return NULL;
1661
1662 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301663 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301664 async_tx_ack(&desc->async_tx);
1665
1666 /* Allocate the link descriptor from DMA pool */
1667 segment = xilinx_vdma_alloc_tx_segment(chan);
1668 if (!segment)
1669 goto error;
1670
1671 /* Fill in the hardware descriptor */
1672 hw = &segment->hw;
1673 hw->vsize = xt->numf;
1674 hw->hsize = xt->sgl[0].size;
Srikanth Thokala6d80f452014-11-05 20:37:02 +02001675 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301676 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301677 hw->stride |= chan->config.frm_dly <<
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301678 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301679
Kedareswara rao Appanab72db402016-04-06 10:38:08 +05301680 if (xt->dir != DMA_MEM_TO_DEV) {
1681 if (chan->ext_addr) {
1682 hw->buf_addr = lower_32_bits(xt->dst_start);
1683 hw->buf_addr_msb = upper_32_bits(xt->dst_start);
1684 } else {
1685 hw->buf_addr = xt->dst_start;
1686 }
1687 } else {
1688 if (chan->ext_addr) {
1689 hw->buf_addr = lower_32_bits(xt->src_start);
1690 hw->buf_addr_msb = upper_32_bits(xt->src_start);
1691 } else {
1692 hw->buf_addr = xt->src_start;
1693 }
1694 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301695
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301696 /* Insert the segment into the descriptor segments list. */
1697 list_add_tail(&segment->node, &desc->segments);
1698
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301699 /* Link the last hardware descriptor with the first. */
1700 segment = list_first_entry(&desc->segments,
1701 struct xilinx_vdma_tx_segment, node);
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301702 desc->async_tx.phys = segment->phys;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301703
1704 return &desc->async_tx;
1705
1706error:
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301707 xilinx_dma_free_tx_descriptor(chan, desc);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301708 return NULL;
1709}
1710
1711/**
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301712 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
1713 * @dchan: DMA channel
1714 * @dma_dst: destination address
1715 * @dma_src: source address
1716 * @len: transfer length
1717 * @flags: transfer ack flags
1718 *
1719 * Return: Async transaction descriptor on success and NULL on failure
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301720 */
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301721static struct dma_async_tx_descriptor *
1722xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
1723 dma_addr_t dma_src, size_t len, unsigned long flags)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301724{
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301725 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1726 struct xilinx_dma_tx_descriptor *desc;
Akinobu Mitadb6a3d02017-03-14 00:59:12 +09001727 struct xilinx_cdma_tx_segment *segment;
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301728 struct xilinx_cdma_desc_hw *hw;
1729
Andrea Merello616f0f82018-11-20 16:31:45 +01001730 if (!len || len > chan->xdev->max_buffer_len)
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301731 return NULL;
1732
1733 desc = xilinx_dma_alloc_tx_descriptor(chan);
1734 if (!desc)
1735 return NULL;
1736
1737 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1738 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1739
1740 /* Allocate the link descriptor from DMA pool */
1741 segment = xilinx_cdma_alloc_tx_segment(chan);
1742 if (!segment)
1743 goto error;
1744
1745 hw = &segment->hw;
1746 hw->control = len;
1747 hw->src_addr = dma_src;
1748 hw->dest_addr = dma_dst;
Kedareswara rao Appana9791e712016-06-07 19:21:16 +05301749 if (chan->ext_addr) {
1750 hw->src_addr_msb = upper_32_bits(dma_src);
1751 hw->dest_addr_msb = upper_32_bits(dma_dst);
1752 }
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301753
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301754 /* Insert the segment into the descriptor segments list. */
1755 list_add_tail(&segment->node, &desc->segments);
1756
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301757 desc->async_tx.phys = segment->phys;
Akinobu Mitadb6a3d02017-03-14 00:59:12 +09001758 hw->next_desc = segment->phys;
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301759
1760 return &desc->async_tx;
1761
1762error:
1763 xilinx_dma_free_tx_descriptor(chan, desc);
1764 return NULL;
1765}
1766
1767/**
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301768 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1769 * @dchan: DMA channel
1770 * @sgl: scatterlist to transfer to/from
1771 * @sg_len: number of entries in @scatterlist
1772 * @direction: DMA direction
1773 * @flags: transfer ack flags
1774 * @context: APP words of the descriptor
1775 *
1776 * Return: Async transaction descriptor on success and NULL on failure
1777 */
1778static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
1779 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
1780 enum dma_transfer_direction direction, unsigned long flags,
1781 void *context)
1782{
1783 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1784 struct xilinx_dma_tx_descriptor *desc;
Kedareswara rao Appana23059402017-12-07 10:51:04 +05301785 struct xilinx_axidma_tx_segment *segment = NULL;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301786 u32 *app_w = (u32 *)context;
1787 struct scatterlist *sg;
1788 size_t copy;
1789 size_t sg_used;
1790 unsigned int i;
1791
1792 if (!is_slave_direction(direction))
1793 return NULL;
1794
1795 /* Allocate a transaction descriptor. */
1796 desc = xilinx_dma_alloc_tx_descriptor(chan);
1797 if (!desc)
1798 return NULL;
1799
1800 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1801 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1802
1803 /* Build transactions using information in the scatter gather list */
1804 for_each_sg(sgl, sg, sg_len, i) {
1805 sg_used = 0;
1806
1807 /* Loop until the entire scatterlist entry is used */
1808 while (sg_used < sg_dma_len(sg)) {
1809 struct xilinx_axidma_desc_hw *hw;
1810
1811 /* Get a free segment */
1812 segment = xilinx_axidma_alloc_tx_segment(chan);
1813 if (!segment)
1814 goto error;
1815
1816 /*
1817 * Calculate the maximum number of bytes to transfer,
1818 * making sure it is less than the hw limit
1819 */
Andrea Merello616f0f82018-11-20 16:31:45 +01001820 copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg),
1821 sg_used);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301822 hw = &segment->hw;
1823
1824 /* Fill in the descriptor */
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +05301825 xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
1826 sg_used, 0);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301827
1828 hw->control = copy;
1829
1830 if (chan->direction == DMA_MEM_TO_DEV) {
1831 if (app_w)
1832 memcpy(hw->app, app_w, sizeof(u32) *
1833 XILINX_DMA_NUM_APP_WORDS);
1834 }
1835
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301836 sg_used += copy;
1837
1838 /*
1839 * Insert the segment into the descriptor segments
1840 * list.
1841 */
1842 list_add_tail(&segment->node, &desc->segments);
1843 }
1844 }
1845
1846 segment = list_first_entry(&desc->segments,
1847 struct xilinx_axidma_tx_segment, node);
1848 desc->async_tx.phys = segment->phys;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301849
1850 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1851 if (chan->direction == DMA_MEM_TO_DEV) {
1852 segment->hw.control |= XILINX_DMA_BD_SOP;
1853 segment = list_last_entry(&desc->segments,
1854 struct xilinx_axidma_tx_segment,
1855 node);
1856 segment->hw.control |= XILINX_DMA_BD_EOP;
1857 }
1858
1859 return &desc->async_tx;
1860
1861error:
1862 xilinx_dma_free_tx_descriptor(chan, desc);
1863 return NULL;
1864}
1865
1866/**
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301867 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +05301868 * @dchan: DMA channel
1869 * @buf_addr: Physical address of the buffer
1870 * @buf_len: Total length of the cyclic buffers
1871 * @period_len: length of individual cyclic buffer
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301872 * @direction: DMA direction
1873 * @flags: transfer ack flags
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +05301874 *
1875 * Return: Async transaction descriptor on success and NULL on failure
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301876 */
1877static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
1878 struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
1879 size_t period_len, enum dma_transfer_direction direction,
1880 unsigned long flags)
1881{
1882 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1883 struct xilinx_dma_tx_descriptor *desc;
1884 struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
1885 size_t copy, sg_used;
1886 unsigned int num_periods;
1887 int i;
1888 u32 reg;
1889
Arnd Bergmannf67c3bd2016-06-13 17:07:33 +02001890 if (!period_len)
1891 return NULL;
1892
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301893 num_periods = buf_len / period_len;
1894
Arnd Bergmannf67c3bd2016-06-13 17:07:33 +02001895 if (!num_periods)
1896 return NULL;
1897
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301898 if (!is_slave_direction(direction))
1899 return NULL;
1900
1901 /* Allocate a transaction descriptor. */
1902 desc = xilinx_dma_alloc_tx_descriptor(chan);
1903 if (!desc)
1904 return NULL;
1905
1906 chan->direction = direction;
1907 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1908 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1909
1910 for (i = 0; i < num_periods; ++i) {
1911 sg_used = 0;
1912
1913 while (sg_used < period_len) {
1914 struct xilinx_axidma_desc_hw *hw;
1915
1916 /* Get a free segment */
1917 segment = xilinx_axidma_alloc_tx_segment(chan);
1918 if (!segment)
1919 goto error;
1920
1921 /*
1922 * Calculate the maximum number of bytes to transfer,
1923 * making sure it is less than the hw limit
1924 */
Andrea Merello616f0f82018-11-20 16:31:45 +01001925 copy = xilinx_dma_calc_copysize(chan, period_len,
1926 sg_used);
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301927 hw = &segment->hw;
Kedareswara rao Appanaf0cba682016-06-07 19:21:15 +05301928 xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
1929 period_len * i);
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301930 hw->control = copy;
1931
1932 if (prev)
1933 prev->hw.next_desc = segment->phys;
1934
1935 prev = segment;
1936 sg_used += copy;
1937
1938 /*
1939 * Insert the segment into the descriptor segments
1940 * list.
1941 */
1942 list_add_tail(&segment->node, &desc->segments);
1943 }
1944 }
1945
1946 head_segment = list_first_entry(&desc->segments,
1947 struct xilinx_axidma_tx_segment, node);
1948 desc->async_tx.phys = head_segment->phys;
1949
1950 desc->cyclic = true;
1951 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1952 reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
1953 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1954
Kedareswara rao Appanae598e6e2016-07-09 14:09:48 +05301955 segment = list_last_entry(&desc->segments,
1956 struct xilinx_axidma_tx_segment,
1957 node);
1958 segment->hw.next_desc = (u32) head_segment->phys;
1959
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301960 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1961 if (direction == DMA_MEM_TO_DEV) {
Kedareswara rao Appanae167a0b2016-06-09 11:32:12 +05301962 head_segment->hw.control |= XILINX_DMA_BD_SOP;
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301963 segment->hw.control |= XILINX_DMA_BD_EOP;
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301964 }
1965
1966 return &desc->async_tx;
1967
1968error:
1969 xilinx_dma_free_tx_descriptor(chan, desc);
1970 return NULL;
1971}
1972
1973/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301974 * xilinx_dma_terminate_all - Halt the channel and free descriptors
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +05301975 * @dchan: Driver specific DMA Channel pointer
1976 *
1977 * Return: '0' always.
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301978 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301979static int xilinx_dma_terminate_all(struct dma_chan *dchan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301980{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301981 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301982 u32 reg;
Akinobu Mita676f9c22017-03-14 00:59:11 +09001983 int err;
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301984
1985 if (chan->cyclic)
1986 xilinx_dma_chan_reset(chan);
Maxime Ripardba714042014-11-17 14:42:38 +01001987
Akinobu Mita676f9c22017-03-14 00:59:11 +09001988 err = chan->stop_transfer(chan);
1989 if (err) {
1990 dev_err(chan->dev, "Cannot stop channel %p: %x\n",
1991 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1992 chan->err = true;
1993 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301994
1995 /* Remove and free all of the descriptors in the lists */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301996 xilinx_dma_free_descriptors(chan);
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05301997 chan->idle = true;
Maxime Ripardba714042014-11-17 14:42:38 +01001998
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301999 if (chan->cyclic) {
2000 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2001 reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2002 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2003 chan->cyclic = false;
2004 }
2005
Kedareswara rao Appana48c62fb2018-01-03 12:12:09 +05302006 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
2007 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2008 XILINX_CDMA_CR_SGMODE);
2009
Maxime Ripardba714042014-11-17 14:42:38 +01002010 return 0;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302011}
2012
2013/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302014 * xilinx_dma_channel_set_config - Configure VDMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302015 * Run-time configuration for Axi VDMA, supports:
2016 * . halt the channel
2017 * . configure interrupt coalescing and inter-packet delay threshold
2018 * . start/stop parking
2019 * . enable genlock
2020 *
2021 * @dchan: DMA channel
2022 * @cfg: VDMA device configuration pointer
2023 *
2024 * Return: '0' on success and failure value on error
2025 */
2026int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
2027 struct xilinx_vdma_config *cfg)
2028{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302029 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302030 u32 dmacr;
2031
2032 if (cfg->reset)
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302033 return xilinx_dma_chan_reset(chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302034
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302035 dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302036
2037 chan->config.frm_dly = cfg->frm_dly;
2038 chan->config.park = cfg->park;
2039
2040 /* genlock settings */
2041 chan->config.gen_lock = cfg->gen_lock;
2042 chan->config.master = cfg->master;
2043
2044 if (cfg->gen_lock && chan->genlock) {
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302045 dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
2046 dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302047 }
2048
2049 chan->config.frm_cnt_en = cfg->frm_cnt_en;
Radhey Shyam Pandey0894aa22018-06-13 13:04:48 +05302050 chan->config.vflip_en = cfg->vflip_en;
2051
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302052 if (cfg->park)
2053 chan->config.park_frm = cfg->park_frm;
2054 else
2055 chan->config.park_frm = -1;
2056
2057 chan->config.coalesc = cfg->coalesc;
2058 chan->config.delay = cfg->delay;
2059
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302060 if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
2061 dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302062 chan->config.coalesc = cfg->coalesc;
2063 }
2064
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302065 if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
2066 dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302067 chan->config.delay = cfg->delay;
2068 }
2069
2070 /* FSync Source selection */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302071 dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
2072 dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302073
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302074 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302075
2076 return 0;
2077}
2078EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
2079
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302080/* -----------------------------------------------------------------------------
2081 * Probe and remove
2082 */
2083
2084/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302085 * xilinx_dma_chan_remove - Per Channel remove function
2086 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302087 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302088static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302089{
2090 /* Disable all interrupts */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302091 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2092 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302093
2094 if (chan->irq > 0)
2095 free_irq(chan->irq, chan);
2096
2097 tasklet_kill(&chan->tasklet);
2098
2099 list_del(&chan->common.device_node);
2100}
2101
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302102static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2103 struct clk **tx_clk, struct clk **rx_clk,
2104 struct clk **sg_clk, struct clk **tmp_clk)
2105{
2106 int err;
2107
2108 *tmp_clk = NULL;
2109
2110 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2111 if (IS_ERR(*axi_clk)) {
2112 err = PTR_ERR(*axi_clk);
Radhey Shyam Pandey944879b2019-09-26 16:21:00 +05302113 if (err != -EPROBE_DEFER)
2114 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n",
2115 err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302116 return err;
2117 }
2118
2119 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2120 if (IS_ERR(*tx_clk))
2121 *tx_clk = NULL;
2122
2123 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2124 if (IS_ERR(*rx_clk))
2125 *rx_clk = NULL;
2126
2127 *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
2128 if (IS_ERR(*sg_clk))
2129 *sg_clk = NULL;
2130
2131 err = clk_prepare_enable(*axi_clk);
2132 if (err) {
Lars-Peter Clausen574897d2017-08-31 13:35:10 +02002133 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302134 return err;
2135 }
2136
2137 err = clk_prepare_enable(*tx_clk);
2138 if (err) {
Lars-Peter Clausen574897d2017-08-31 13:35:10 +02002139 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302140 goto err_disable_axiclk;
2141 }
2142
2143 err = clk_prepare_enable(*rx_clk);
2144 if (err) {
Lars-Peter Clausen574897d2017-08-31 13:35:10 +02002145 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302146 goto err_disable_txclk;
2147 }
2148
2149 err = clk_prepare_enable(*sg_clk);
2150 if (err) {
Lars-Peter Clausen574897d2017-08-31 13:35:10 +02002151 dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302152 goto err_disable_rxclk;
2153 }
2154
2155 return 0;
2156
2157err_disable_rxclk:
2158 clk_disable_unprepare(*rx_clk);
2159err_disable_txclk:
2160 clk_disable_unprepare(*tx_clk);
2161err_disable_axiclk:
2162 clk_disable_unprepare(*axi_clk);
2163
2164 return err;
2165}
2166
2167static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2168 struct clk **dev_clk, struct clk **tmp_clk,
2169 struct clk **tmp1_clk, struct clk **tmp2_clk)
2170{
2171 int err;
2172
2173 *tmp_clk = NULL;
2174 *tmp1_clk = NULL;
2175 *tmp2_clk = NULL;
2176
2177 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2178 if (IS_ERR(*axi_clk)) {
2179 err = PTR_ERR(*axi_clk);
Radhey Shyam Pandey944879b2019-09-26 16:21:00 +05302180 if (err != -EPROBE_DEFER)
2181 dev_err(&pdev->dev, "failed to get axi_clk (%d)\n",
2182 err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302183 return err;
2184 }
2185
2186 *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
2187 if (IS_ERR(*dev_clk)) {
2188 err = PTR_ERR(*dev_clk);
Radhey Shyam Pandey944879b2019-09-26 16:21:00 +05302189 if (err != -EPROBE_DEFER)
2190 dev_err(&pdev->dev, "failed to get dev_clk (%d)\n",
2191 err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302192 return err;
2193 }
2194
2195 err = clk_prepare_enable(*axi_clk);
2196 if (err) {
Lars-Peter Clausen574897d2017-08-31 13:35:10 +02002197 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302198 return err;
2199 }
2200
2201 err = clk_prepare_enable(*dev_clk);
2202 if (err) {
Lars-Peter Clausen574897d2017-08-31 13:35:10 +02002203 dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302204 goto err_disable_axiclk;
2205 }
2206
2207 return 0;
2208
2209err_disable_axiclk:
2210 clk_disable_unprepare(*axi_clk);
2211
2212 return err;
2213}
2214
2215static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2216 struct clk **tx_clk, struct clk **txs_clk,
2217 struct clk **rx_clk, struct clk **rxs_clk)
2218{
2219 int err;
2220
2221 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2222 if (IS_ERR(*axi_clk)) {
2223 err = PTR_ERR(*axi_clk);
Radhey Shyam Pandey944879b2019-09-26 16:21:00 +05302224 if (err != -EPROBE_DEFER)
2225 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n",
2226 err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302227 return err;
2228 }
2229
2230 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2231 if (IS_ERR(*tx_clk))
2232 *tx_clk = NULL;
2233
2234 *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
2235 if (IS_ERR(*txs_clk))
2236 *txs_clk = NULL;
2237
2238 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2239 if (IS_ERR(*rx_clk))
2240 *rx_clk = NULL;
2241
2242 *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
2243 if (IS_ERR(*rxs_clk))
2244 *rxs_clk = NULL;
2245
2246 err = clk_prepare_enable(*axi_clk);
2247 if (err) {
Radhey Shyam Pandey944879b2019-09-26 16:21:00 +05302248 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n",
2249 err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302250 return err;
2251 }
2252
2253 err = clk_prepare_enable(*tx_clk);
2254 if (err) {
Lars-Peter Clausen574897d2017-08-31 13:35:10 +02002255 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302256 goto err_disable_axiclk;
2257 }
2258
2259 err = clk_prepare_enable(*txs_clk);
2260 if (err) {
Lars-Peter Clausen574897d2017-08-31 13:35:10 +02002261 dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302262 goto err_disable_txclk;
2263 }
2264
2265 err = clk_prepare_enable(*rx_clk);
2266 if (err) {
Lars-Peter Clausen574897d2017-08-31 13:35:10 +02002267 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302268 goto err_disable_txsclk;
2269 }
2270
2271 err = clk_prepare_enable(*rxs_clk);
2272 if (err) {
Lars-Peter Clausen574897d2017-08-31 13:35:10 +02002273 dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302274 goto err_disable_rxclk;
2275 }
2276
2277 return 0;
2278
2279err_disable_rxclk:
2280 clk_disable_unprepare(*rx_clk);
2281err_disable_txsclk:
2282 clk_disable_unprepare(*txs_clk);
2283err_disable_txclk:
2284 clk_disable_unprepare(*tx_clk);
2285err_disable_axiclk:
2286 clk_disable_unprepare(*axi_clk);
2287
2288 return err;
2289}
2290
2291static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
2292{
2293 clk_disable_unprepare(xdev->rxs_clk);
2294 clk_disable_unprepare(xdev->rx_clk);
2295 clk_disable_unprepare(xdev->txs_clk);
2296 clk_disable_unprepare(xdev->tx_clk);
2297 clk_disable_unprepare(xdev->axi_clk);
2298}
2299
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302300/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302301 * xilinx_dma_chan_probe - Per Channel Probing
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302302 * It get channel features from the device tree entry and
2303 * initialize special channel handling routines
2304 *
2305 * @xdev: Driver specific device structure
2306 * @node: Device node
Kedareswara rao Appanae50a0ad2017-12-07 10:51:05 +05302307 * @chan_id: DMA Channel id
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302308 *
2309 * Return: '0' on success and failure value on error
2310 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302311static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05302312 struct device_node *node, int chan_id)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302313{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302314 struct xilinx_dma_chan *chan;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302315 bool has_dre = false;
2316 u32 value, width;
2317 int err;
2318
2319 /* Allocate and initialize the channel structure */
2320 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
2321 if (!chan)
2322 return -ENOMEM;
2323
2324 chan->dev = xdev->dev;
2325 chan->xdev = xdev;
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05302326 chan->desc_pendingcount = 0x0;
Kedareswara rao Appanab72db402016-04-06 10:38:08 +05302327 chan->ext_addr = xdev->ext_addr;
Vinod Koul30931862017-12-18 10:48:05 +05302328 /* This variable ensures that descriptors are not
2329 * Submitted when dma engine is in progress. This variable is
2330 * Added to avoid polling for a bit in the status register to
Kedareswara rao Appana21e02a32017-12-07 10:51:02 +05302331 * Know dma state in the driver hot path.
2332 */
2333 chan->idle = true;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302334
2335 spin_lock_init(&chan->lock);
2336 INIT_LIST_HEAD(&chan->pending_list);
2337 INIT_LIST_HEAD(&chan->done_list);
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05302338 INIT_LIST_HEAD(&chan->active_list);
Kedareswara rao Appana23059402017-12-07 10:51:04 +05302339 INIT_LIST_HEAD(&chan->free_seg_list);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302340
2341 /* Retrieve the channel properties from the device tree */
2342 has_dre = of_property_read_bool(node, "xlnx,include-dre");
2343
2344 chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
2345
2346 err = of_property_read_u32(node, "xlnx,datawidth", &value);
2347 if (err) {
2348 dev_err(xdev->dev, "missing xlnx,datawidth property\n");
2349 return err;
2350 }
2351 width = value >> 3; /* Convert bits to bytes */
2352
2353 /* If data width is greater than 8 bytes, DRE is not in hw */
2354 if (width > 8)
2355 has_dre = false;
2356
2357 if (!has_dre)
2358 xdev->common.copy_align = fls(width - 1);
2359
Kedareswara rao Appanae131f1b2016-06-24 10:51:26 +05302360 if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
2361 of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
2362 of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302363 chan->direction = DMA_MEM_TO_DEV;
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05302364 chan->id = chan_id;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302365
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302366 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302367 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302368 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
Kedareswara rao Appanafe0503e2017-12-07 10:51:03 +05302369 chan->config.park = 1;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302370
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302371 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2372 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
2373 chan->flush_on_fsync = true;
2374 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302375 } else if (of_device_is_compatible(node,
Kedareswara rao Appanae131f1b2016-06-24 10:51:26 +05302376 "xlnx,axi-vdma-s2mm-channel") ||
2377 of_device_is_compatible(node,
2378 "xlnx,axi-dma-s2mm-channel")) {
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302379 chan->direction = DMA_DEV_TO_MEM;
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05302380 chan->id = chan_id;
Radhey Shyam Pandey0894aa22018-06-13 13:04:48 +05302381 chan->has_vflip = of_property_read_bool(node,
2382 "xlnx,enable-vert-flip");
2383 if (chan->has_vflip) {
2384 chan->config.vflip_en = dma_read(chan,
2385 XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) &
2386 XILINX_VDMA_ENABLE_VERTICAL_FLIP;
2387 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302388
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302389 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302390 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302391 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
Kedareswara rao Appanafe0503e2017-12-07 10:51:03 +05302392 chan->config.park = 1;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302393
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302394 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2395 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
2396 chan->flush_on_fsync = true;
2397 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302398 } else {
2399 dev_err(xdev->dev, "Invalid channel compatible node\n");
2400 return -EINVAL;
2401 }
2402
2403 /* Request the interrupt */
2404 chan->irq = irq_of_parse_and_map(node, 0);
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302405 err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED,
2406 "xilinx-dma-controller", chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302407 if (err) {
2408 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
2409 return err;
2410 }
2411
Akinobu Mita676f9c22017-03-14 00:59:11 +09002412 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302413 chan->start_transfer = xilinx_dma_start_transfer;
Akinobu Mita676f9c22017-03-14 00:59:11 +09002414 chan->stop_transfer = xilinx_dma_stop_transfer;
2415 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05302416 chan->start_transfer = xilinx_cdma_start_transfer;
Akinobu Mita676f9c22017-03-14 00:59:11 +09002417 chan->stop_transfer = xilinx_cdma_stop_transfer;
2418 } else {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302419 chan->start_transfer = xilinx_vdma_start_transfer;
Akinobu Mita676f9c22017-03-14 00:59:11 +09002420 chan->stop_transfer = xilinx_dma_stop_transfer;
2421 }
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302422
Andrea Merello05f7ea72018-11-20 16:31:49 +01002423 /* check if SG is enabled (only for AXIDMA and CDMA) */
2424 if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) {
2425 if (dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
2426 XILINX_DMA_DMASR_SG_MASK)
2427 chan->has_sg = true;
2428 dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id,
2429 chan->has_sg ? "enabled" : "disabled");
2430 }
2431
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302432 /* Initialize the tasklet */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302433 tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302434 (unsigned long)chan);
2435
2436 /*
2437 * Initialize the DMA channel and add it to the DMA engine channels
2438 * list.
2439 */
2440 chan->common.device = &xdev->common;
2441
2442 list_add_tail(&chan->common.device_node, &xdev->common.channels);
2443 xdev->chan[chan->id] = chan;
2444
2445 /* Reset the channel */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302446 err = xilinx_dma_chan_reset(chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302447 if (err < 0) {
2448 dev_err(xdev->dev, "Reset channel failed\n");
2449 return err;
2450 }
2451
2452 return 0;
2453}
2454
2455/**
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05302456 * xilinx_dma_child_probe - Per child node probe
2457 * It get number of dma-channels per child node from
2458 * device-tree and initializes all the channels.
2459 *
2460 * @xdev: Driver specific device structure
2461 * @node: Device node
2462 *
2463 * Return: 0 always.
2464 */
2465static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
Kedareswara rao Appana22653af2017-12-07 10:51:06 +05302466 struct device_node *node)
2467{
Radhey Shyam Pandeybcb2dc72019-10-22 22:30:20 +05302468 int i, nr_channels = 1;
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05302469
2470 for (i = 0; i < nr_channels; i++)
2471 xilinx_dma_chan_probe(xdev, node, xdev->chan_id++);
2472
2473 xdev->nr_channels += nr_channels;
2474
2475 return 0;
2476}
2477
2478/**
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302479 * of_dma_xilinx_xlate - Translation function
2480 * @dma_spec: Pointer to DMA specifier as found in the device tree
2481 * @ofdma: Pointer to DMA controller data
2482 *
2483 * Return: DMA channel pointer on success and NULL on error
2484 */
2485static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
2486 struct of_dma *ofdma)
2487{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302488 struct xilinx_dma_device *xdev = ofdma->of_dma_data;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302489 int chan_id = dma_spec->args[0];
2490
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05302491 if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id])
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302492 return NULL;
2493
2494 return dma_get_slave_channel(&xdev->chan[chan_id]->common);
2495}
2496
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302497static const struct xilinx_dma_config axidma_config = {
2498 .dmatype = XDMA_TYPE_AXIDMA,
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302499 .clk_init = axidma_clk_init,
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302500};
2501
2502static const struct xilinx_dma_config axicdma_config = {
2503 .dmatype = XDMA_TYPE_CDMA,
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302504 .clk_init = axicdma_clk_init,
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302505};
2506
2507static const struct xilinx_dma_config axivdma_config = {
2508 .dmatype = XDMA_TYPE_VDMA,
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302509 .clk_init = axivdma_clk_init,
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302510};
2511
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302512static const struct of_device_id xilinx_dma_of_ids[] = {
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302513 { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
2514 { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
2515 { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302516 {}
2517};
2518MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
2519
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302520/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302521 * xilinx_dma_probe - Driver probe function
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302522 * @pdev: Pointer to the platform_device structure
2523 *
2524 * Return: '0' on success and failure value on error
2525 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302526static int xilinx_dma_probe(struct platform_device *pdev)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302527{
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302528 int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
2529 struct clk **, struct clk **, struct clk **)
2530 = axivdma_clk_init;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302531 struct device_node *node = pdev->dev.of_node;
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302532 struct xilinx_dma_device *xdev;
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302533 struct device_node *child, *np = pdev->dev.of_node;
Radhey Shyam Pandeyae809692018-11-20 16:31:48 +01002534 u32 num_frames, addr_width, len_width;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302535 int i, err;
2536
2537 /* Allocate and initialize the DMA engine structure */
2538 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
2539 if (!xdev)
2540 return -ENOMEM;
2541
2542 xdev->dev = &pdev->dev;
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302543 if (np) {
2544 const struct of_device_id *match;
2545
2546 match = of_match_node(xilinx_dma_of_ids, np);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302547 if (match && match->data) {
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302548 xdev->dma_config = match->data;
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302549 clk_init = xdev->dma_config->clk_init;
2550 }
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302551 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302552
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302553 err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
2554 &xdev->rx_clk, &xdev->rxs_clk);
2555 if (err)
2556 return err;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302557
2558 /* Request and map I/O memory */
Radhey Shyam Pandeya8bd4752019-09-26 16:20:59 +05302559 xdev->regs = devm_platform_ioremap_resource(pdev, 0);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302560 if (IS_ERR(xdev->regs))
2561 return PTR_ERR(xdev->regs);
2562
2563 /* Retrieve the DMA engine properties from the device tree */
Radhey Shyam Pandeyae809692018-11-20 16:31:48 +01002564 xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
Andrea Merello616f0f82018-11-20 16:31:45 +01002565
Radhey Shyam Pandeyae809692018-11-20 16:31:48 +01002566 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
Radhey Shyam Pandeyae809692018-11-20 16:31:48 +01002567 if (!of_property_read_u32(node, "xlnx,sg-length-width",
2568 &len_width)) {
2569 if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
2570 len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) {
2571 dev_warn(xdev->dev,
2572 "invalid xlnx,sg-length-width property value. Using default width\n");
2573 } else {
2574 if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX)
2575 dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n");
2576 xdev->max_buffer_len =
2577 GENMASK(len_width - 1, 0);
2578 }
2579 }
2580 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302581
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302582 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302583 err = of_property_read_u32(node, "xlnx,num-fstores",
2584 &num_frames);
2585 if (err < 0) {
2586 dev_err(xdev->dev,
2587 "missing xlnx,num-fstores property\n");
2588 return err;
2589 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302590
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302591 err = of_property_read_u32(node, "xlnx,flush-fsync",
2592 &xdev->flush_on_fsync);
2593 if (err < 0)
2594 dev_warn(xdev->dev,
2595 "missing xlnx,flush-fsync property\n");
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302596 }
2597
Kedareswara rao Appanab72db402016-04-06 10:38:08 +05302598 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302599 if (err < 0)
Kedareswara rao Appanab72db402016-04-06 10:38:08 +05302600 dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
2601
2602 if (addr_width > 32)
2603 xdev->ext_addr = true;
2604 else
2605 xdev->ext_addr = false;
2606
2607 /* Set the dma mask bits */
2608 dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width));
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302609
2610 /* Initialize the DMA engine */
2611 xdev->common.dev = &pdev->dev;
2612
2613 INIT_LIST_HEAD(&xdev->common.channels);
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302614 if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05302615 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
2616 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
2617 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302618
2619 xdev->common.device_alloc_chan_resources =
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302620 xilinx_dma_alloc_chan_resources;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302621 xdev->common.device_free_chan_resources =
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302622 xilinx_dma_free_chan_resources;
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302623 xdev->common.device_terminate_all = xilinx_dma_terminate_all;
2624 xdev->common.device_tx_status = xilinx_dma_tx_status;
2625 xdev->common.device_issue_pending = xilinx_dma_issue_pending;
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302626 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05302627 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302628 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05302629 xdev->common.device_prep_dma_cyclic =
2630 xilinx_dma_prep_dma_cyclic;
Nicholas Graumanna575d0b2019-10-15 20:18:21 +05302631 /* Residue calculation is supported by only AXI DMA and CDMA */
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302632 xdev->common.residue_granularity =
2633 DMA_RESIDUE_GRANULARITY_SEGMENT;
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302634 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05302635 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
2636 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
Nicholas Graumanna575d0b2019-10-15 20:18:21 +05302637 /* Residue calculation is supported by only AXI DMA and CDMA */
2638 xdev->common.residue_granularity =
2639 DMA_RESIDUE_GRANULARITY_SEGMENT;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302640 } else {
2641 xdev->common.device_prep_interleaved_dma =
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302642 xilinx_vdma_dma_prep_interleaved;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302643 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302644
2645 platform_set_drvdata(pdev, xdev);
2646
2647 /* Initialize the channels */
2648 for_each_child_of_node(node, child) {
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05302649 err = xilinx_dma_child_probe(xdev, child);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302650 if (err < 0)
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302651 goto disable_clks;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302652 }
2653
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302654 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05302655 for (i = 0; i < xdev->nr_channels; i++)
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302656 if (xdev->chan[i])
2657 xdev->chan[i]->num_frms = num_frames;
2658 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302659
2660 /* Register the DMA engine with the core */
2661 dma_async_device_register(&xdev->common);
2662
2663 err = of_dma_controller_register(node, of_dma_xilinx_xlate,
2664 xdev);
2665 if (err < 0) {
2666 dev_err(&pdev->dev, "Unable to register DMA to DT\n");
2667 dma_async_device_unregister(&xdev->common);
2668 goto error;
2669 }
2670
Kedareswara rao Appanac7a03592017-12-07 10:51:07 +05302671 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
2672 dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
2673 else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
2674 dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
2675 else
2676 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302677
2678 return 0;
2679
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302680disable_clks:
2681 xdma_disable_allclks(xdev);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302682error:
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05302683 for (i = 0; i < xdev->nr_channels; i++)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302684 if (xdev->chan[i])
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302685 xilinx_dma_chan_remove(xdev->chan[i]);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302686
2687 return err;
2688}
2689
2690/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302691 * xilinx_dma_remove - Driver remove function
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302692 * @pdev: Pointer to the platform_device structure
2693 *
2694 * Return: Always '0'
2695 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302696static int xilinx_dma_remove(struct platform_device *pdev)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302697{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302698 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302699 int i;
2700
2701 of_dma_controller_free(pdev->dev.of_node);
2702
2703 dma_async_device_unregister(&xdev->common);
2704
Kedareswara rao Appana1a9e7a02016-06-24 10:51:23 +05302705 for (i = 0; i < xdev->nr_channels; i++)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302706 if (xdev->chan[i])
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302707 xilinx_dma_chan_remove(xdev->chan[i]);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302708
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302709 xdma_disable_allclks(xdev);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302710
2711 return 0;
2712}
2713
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302714static struct platform_driver xilinx_vdma_driver = {
2715 .driver = {
2716 .name = "xilinx-vdma",
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302717 .of_match_table = xilinx_dma_of_ids,
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302718 },
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302719 .probe = xilinx_dma_probe,
2720 .remove = xilinx_dma_remove,
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302721};
2722
2723module_platform_driver(xilinx_vdma_driver);
2724
2725MODULE_AUTHOR("Xilinx, Inc.");
2726MODULE_DESCRIPTION("Xilinx VDMA driver");
2727MODULE_LICENSE("GPL v2");