blob: 3b622d660c8b0da008a953d08ee14084adbe16b6 [file] [log] [blame]
Fabio Estevamc01faac2018-05-21 23:53:30 -03001// SPDX-License-Identifier: GPL-2.0+
2//
3// drivers/dma/imx-sdma.c
4//
5// This file contains a driver for the Freescale Smart DMA engine
6//
7// Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
8//
9// Based on code from Freescale:
10//
11// Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
Sascha Hauer1ec1e822010-09-30 13:56:34 +000012
13#include <linux/init.h>
Michael Olbrich1d069bf2016-07-07 11:35:51 +020014#include <linux/iopoll.h>
Axel Linf8de8f42011-08-30 15:08:24 +080015#include <linux/module.h>
Sascha Hauer1ec1e822010-09-30 13:56:34 +000016#include <linux/types.h>
Richard Zhao0bbc1412012-01-13 11:10:01 +080017#include <linux/bitops.h>
Sascha Hauer1ec1e822010-09-30 13:56:34 +000018#include <linux/mm.h>
19#include <linux/interrupt.h>
20#include <linux/clk.h>
Richard Zhao2ccaef02012-05-11 15:14:27 +080021#include <linux/delay.h>
Sascha Hauer1ec1e822010-09-30 13:56:34 +000022#include <linux/sched.h>
23#include <linux/semaphore.h>
24#include <linux/spinlock.h>
25#include <linux/device.h>
26#include <linux/dma-mapping.h>
Robin Gongfe5b85c2018-06-20 00:57:04 +080027#include <linux/dmapool.h>
Sascha Hauer1ec1e822010-09-30 13:56:34 +000028#include <linux/firmware.h>
29#include <linux/slab.h>
30#include <linux/platform_device.h>
31#include <linux/dmaengine.h>
Shawn Guo580975d2011-07-14 08:35:48 +080032#include <linux/of.h>
Shengjiu Wang8391ecf2015-07-10 17:08:16 +080033#include <linux/of_address.h>
Shawn Guo580975d2011-07-14 08:35:48 +080034#include <linux/of_device.h>
Shawn Guo9479e172013-05-30 22:23:32 +080035#include <linux/of_dma.h>
Sascha Hauer1ec1e822010-09-30 13:56:34 +000036
37#include <asm/irq.h>
Arnd Bergmann82906b12012-08-24 15:14:29 +020038#include <linux/platform_data/dma-imx-sdma.h>
39#include <linux/platform_data/dma-imx.h>
Zidan Wangd078cd12015-07-23 11:40:49 +080040#include <linux/regmap.h>
41#include <linux/mfd/syscon.h>
42#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
Sascha Hauer1ec1e822010-09-30 13:56:34 +000043
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000044#include "dmaengine.h"
Robin Gong57b772b2018-06-20 00:57:00 +080045#include "virt-dma.h"
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000046
Sascha Hauer1ec1e822010-09-30 13:56:34 +000047/* SDMA registers */
48#define SDMA_H_C0PTR 0x000
49#define SDMA_H_INTR 0x004
50#define SDMA_H_STATSTOP 0x008
51#define SDMA_H_START 0x00c
52#define SDMA_H_EVTOVR 0x010
53#define SDMA_H_DSPOVR 0x014
54#define SDMA_H_HOSTOVR 0x018
55#define SDMA_H_EVTPEND 0x01c
56#define SDMA_H_DSPENBL 0x020
57#define SDMA_H_RESET 0x024
58#define SDMA_H_EVTERR 0x028
59#define SDMA_H_INTRMSK 0x02c
60#define SDMA_H_PSW 0x030
61#define SDMA_H_EVTERRDBG 0x034
62#define SDMA_H_CONFIG 0x038
63#define SDMA_ONCE_ENB 0x040
64#define SDMA_ONCE_DATA 0x044
65#define SDMA_ONCE_INSTR 0x048
66#define SDMA_ONCE_STAT 0x04c
67#define SDMA_ONCE_CMD 0x050
68#define SDMA_EVT_MIRROR 0x054
69#define SDMA_ILLINSTADDR 0x058
70#define SDMA_CHN0ADDR 0x05c
71#define SDMA_ONCE_RTB 0x060
72#define SDMA_XTRIG_CONF1 0x070
73#define SDMA_XTRIG_CONF2 0x074
Shawn Guo62550cd2011-07-13 21:33:17 +080074#define SDMA_CHNENBL0_IMX35 0x200
75#define SDMA_CHNENBL0_IMX31 0x080
Sascha Hauer1ec1e822010-09-30 13:56:34 +000076#define SDMA_CHNPRI_0 0x100
77
78/*
79 * Buffer descriptor status values.
80 */
81#define BD_DONE 0x01
82#define BD_WRAP 0x02
83#define BD_CONT 0x04
84#define BD_INTR 0x08
85#define BD_RROR 0x10
86#define BD_LAST 0x20
87#define BD_EXTD 0x80
88
89/*
90 * Data Node descriptor status values.
91 */
92#define DND_END_OF_FRAME 0x80
93#define DND_END_OF_XFER 0x40
94#define DND_DONE 0x20
95#define DND_UNUSED 0x01
96
97/*
98 * IPCV2 descriptor status values.
99 */
100#define BD_IPCV2_END_OF_FRAME 0x40
101
102#define IPCV2_MAX_NODES 50
103/*
104 * Error bit set in the CCB status field by the SDMA,
105 * in setbd routine, in case of a transfer error
106 */
107#define DATA_ERROR 0x10000000
108
109/*
110 * Buffer descriptor commands.
111 */
112#define C0_ADDR 0x01
113#define C0_LOAD 0x02
114#define C0_DUMP 0x03
115#define C0_SETCTX 0x07
116#define C0_GETCTX 0x03
117#define C0_SETDM 0x01
118#define C0_SETPM 0x04
119#define C0_GETDM 0x02
120#define C0_GETPM 0x08
121/*
122 * Change endianness indicator in the BD command field
123 */
124#define CHANGE_ENDIANNESS 0x80
125
126/*
Shengjiu Wang8391ecf2015-07-10 17:08:16 +0800127 * p_2_p watermark_level description
128 * Bits Name Description
129 * 0-7 Lower WML Lower watermark level
130 * 8 PS 1: Pad Swallowing
131 * 0: No Pad Swallowing
132 * 9 PA 1: Pad Adding
133 * 0: No Pad Adding
134 * 10 SPDIF If this bit is set both source
135 * and destination are on SPBA
136 * 11 Source Bit(SP) 1: Source on SPBA
137 * 0: Source on AIPS
138 * 12 Destination Bit(DP) 1: Destination on SPBA
139 * 0: Destination on AIPS
140 * 13-15 --------- MUST BE 0
141 * 16-23 Higher WML HWML
142 * 24-27 N Total number of samples after
143 * which Pad adding/Swallowing
144 * must be done. It must be odd.
145 * 28 Lower WML Event(LWE) SDMA events reg to check for
146 * LWML event mask
147 * 0: LWE in EVENTS register
148 * 1: LWE in EVENTS2 register
149 * 29 Higher WML Event(HWE) SDMA events reg to check for
150 * HWML event mask
151 * 0: HWE in EVENTS register
152 * 1: HWE in EVENTS2 register
153 * 30 --------- MUST BE 0
154 * 31 CONT 1: Amount of samples to be
155 * transferred is unknown and
156 * script will keep on
157 * transferring samples as long as
158 * both events are detected and
159 * script must be manually stopped
160 * by the application
161 * 0: The amount of samples to be
162 * transferred is equal to the
163 * count field of mode word
164 */
165#define SDMA_WATERMARK_LEVEL_LWML 0xFF
166#define SDMA_WATERMARK_LEVEL_PS BIT(8)
167#define SDMA_WATERMARK_LEVEL_PA BIT(9)
168#define SDMA_WATERMARK_LEVEL_SPDIF BIT(10)
169#define SDMA_WATERMARK_LEVEL_SP BIT(11)
170#define SDMA_WATERMARK_LEVEL_DP BIT(12)
171#define SDMA_WATERMARK_LEVEL_HWML (0xFF << 16)
172#define SDMA_WATERMARK_LEVEL_LWE BIT(28)
173#define SDMA_WATERMARK_LEVEL_HWE BIT(29)
174#define SDMA_WATERMARK_LEVEL_CONT BIT(31)
175
Nicolin Chenf9d4a392017-09-14 11:46:43 -0700176#define SDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
177 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
178 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
179
180#define SDMA_DMA_DIRECTIONS (BIT(DMA_DEV_TO_MEM) | \
181 BIT(DMA_MEM_TO_DEV) | \
182 BIT(DMA_DEV_TO_DEV))
183
Shengjiu Wang8391ecf2015-07-10 17:08:16 +0800184/*
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000185 * Mode/Count of data node descriptors - IPCv2
186 */
187struct sdma_mode_count {
188 u32 count : 16; /* size of the buffer pointed by this BD */
189 u32 status : 8; /* E,R,I,C,W,D status bits stored here */
Martin Kaisere4b75762016-08-08 22:45:58 +0200190 u32 command : 8; /* command mostly used for channel 0 */
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000191};
192
193/*
194 * Buffer descriptor
195 */
196struct sdma_buffer_descriptor {
197 struct sdma_mode_count mode;
198 u32 buffer_addr; /* address of the buffer described */
199 u32 ext_buffer_addr; /* extended buffer address */
200} __attribute__ ((packed));
201
202/**
203 * struct sdma_channel_control - Channel control Block
204 *
Robin Gong24ca3122018-07-04 18:06:42 +0800205 * @current_bd_ptr: current buffer descriptor processed
206 * @base_bd_ptr: first element of buffer descriptor array
207 * @unused: padding. The SDMA engine expects an array of 128 byte
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000208 * control blocks
209 */
210struct sdma_channel_control {
211 u32 current_bd_ptr;
212 u32 base_bd_ptr;
213 u32 unused[2];
214} __attribute__ ((packed));
215
216/**
217 * struct sdma_state_registers - SDMA context for a channel
218 *
219 * @pc: program counter
Robin Gong24ca3122018-07-04 18:06:42 +0800220 * @unused1: unused
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000221 * @t: test bit: status of arithmetic & test instruction
222 * @rpc: return program counter
Robin Gong24ca3122018-07-04 18:06:42 +0800223 * @unused0: unused
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000224 * @sf: source fault while loading data
225 * @spc: loop start program counter
Robin Gong24ca3122018-07-04 18:06:42 +0800226 * @unused2: unused
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000227 * @df: destination fault while storing data
228 * @epc: loop end program counter
229 * @lm: loop mode
230 */
231struct sdma_state_registers {
232 u32 pc :14;
233 u32 unused1: 1;
234 u32 t : 1;
235 u32 rpc :14;
236 u32 unused0: 1;
237 u32 sf : 1;
238 u32 spc :14;
239 u32 unused2: 1;
240 u32 df : 1;
241 u32 epc :14;
242 u32 lm : 2;
243} __attribute__ ((packed));
244
245/**
246 * struct sdma_context_data - sdma context specific to a channel
247 *
248 * @channel_state: channel state bits
249 * @gReg: general registers
250 * @mda: burst dma destination address register
251 * @msa: burst dma source address register
252 * @ms: burst dma status register
253 * @md: burst dma data register
254 * @pda: peripheral dma destination address register
255 * @psa: peripheral dma source address register
256 * @ps: peripheral dma status register
257 * @pd: peripheral dma data register
258 * @ca: CRC polynomial register
259 * @cs: CRC accumulator register
260 * @dda: dedicated core destination address register
261 * @dsa: dedicated core source address register
262 * @ds: dedicated core status register
263 * @dd: dedicated core data register
Robin Gong24ca3122018-07-04 18:06:42 +0800264 * @scratch0: 1st word of dedicated ram for context switch
265 * @scratch1: 2nd word of dedicated ram for context switch
266 * @scratch2: 3rd word of dedicated ram for context switch
267 * @scratch3: 4th word of dedicated ram for context switch
268 * @scratch4: 5th word of dedicated ram for context switch
269 * @scratch5: 6th word of dedicated ram for context switch
270 * @scratch6: 7th word of dedicated ram for context switch
271 * @scratch7: 8th word of dedicated ram for context switch
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000272 */
273struct sdma_context_data {
274 struct sdma_state_registers channel_state;
275 u32 gReg[8];
276 u32 mda;
277 u32 msa;
278 u32 ms;
279 u32 md;
280 u32 pda;
281 u32 psa;
282 u32 ps;
283 u32 pd;
284 u32 ca;
285 u32 cs;
286 u32 dda;
287 u32 dsa;
288 u32 ds;
289 u32 dd;
290 u32 scratch0;
291 u32 scratch1;
292 u32 scratch2;
293 u32 scratch3;
294 u32 scratch4;
295 u32 scratch5;
296 u32 scratch6;
297 u32 scratch7;
298} __attribute__ ((packed));
299
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000300
301struct sdma_engine;
302
303/**
Sascha Hauer76c33d22018-06-20 00:56:59 +0800304 * struct sdma_desc - descriptor structor for one transfer
Robin Gong24ca3122018-07-04 18:06:42 +0800305 * @vd: descriptor for virt dma
306 * @num_bd: number of descriptors currently handling
307 * @bd_phys: physical address of bd
308 * @buf_tail: ID of the buffer that was processed
309 * @buf_ptail: ID of the previous buffer that was processed
310 * @period_len: period length, used in cyclic.
311 * @chn_real_count: the real count updated from bd->mode.count
312 * @chn_count: the transfer count set
313 * @sdmac: sdma_channel pointer
314 * @bd: pointer of allocate bd
Sascha Hauer76c33d22018-06-20 00:56:59 +0800315 */
316struct sdma_desc {
Robin Gong57b772b2018-06-20 00:57:00 +0800317 struct virt_dma_desc vd;
Sascha Hauer76c33d22018-06-20 00:56:59 +0800318 unsigned int num_bd;
319 dma_addr_t bd_phys;
320 unsigned int buf_tail;
321 unsigned int buf_ptail;
322 unsigned int period_len;
323 unsigned int chn_real_count;
324 unsigned int chn_count;
325 struct sdma_channel *sdmac;
326 struct sdma_buffer_descriptor *bd;
327};
328
329/**
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000330 * struct sdma_channel - housekeeping for a SDMA channel
331 *
Robin Gong24ca3122018-07-04 18:06:42 +0800332 * @vc: virt_dma base structure
333 * @desc: sdma description including vd and other special member
334 * @sdma: pointer to the SDMA engine for this channel
335 * @channel: the channel number, matches dmaengine chan_id + 1
336 * @direction: transfer type. Needed for setting SDMA script
337 * @peripheral_type: Peripheral type. Needed for setting SDMA script
338 * @event_id0: aka dma request line
339 * @event_id1: for channels that use 2 events
340 * @word_size: peripheral access size
341 * @pc_from_device: script address for those device_2_memory
342 * @pc_to_device: script address for those memory_2_device
343 * @device_to_device: script address for those device_2_device
344 * @flags: loop mode or not
345 * @per_address: peripheral source or destination address in common case
346 * destination address in p_2_p case
347 * @per_address2: peripheral source address in p_2_p case
348 * @event_mask: event mask used in p_2_p script
349 * @watermark_level: value for gReg[7], some script will extend it from
350 * basic watermark such as p_2_p
351 * @shp_addr: value for gReg[6]
352 * @per_addr: value for gReg[2]
353 * @status: status of dma channel
354 * @data: specific sdma interface structure
355 * @bd_pool: dma_pool for bd
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000356 */
357struct sdma_channel {
Robin Gong57b772b2018-06-20 00:57:00 +0800358 struct virt_dma_chan vc;
Sascha Hauer76c33d22018-06-20 00:56:59 +0800359 struct sdma_desc *desc;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000360 struct sdma_engine *sdma;
361 unsigned int channel;
Vinod Kouldb8196d2011-10-13 22:34:23 +0530362 enum dma_transfer_direction direction;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000363 enum sdma_peripheral_type peripheral_type;
364 unsigned int event_id0;
365 unsigned int event_id1;
366 enum dma_slave_buswidth word_size;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000367 unsigned int pc_from_device, pc_to_device;
Shengjiu Wang8391ecf2015-07-10 17:08:16 +0800368 unsigned int device_to_device;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000369 unsigned long flags;
Shengjiu Wang8391ecf2015-07-10 17:08:16 +0800370 dma_addr_t per_address, per_address2;
Richard Zhao0bbc1412012-01-13 11:10:01 +0800371 unsigned long event_mask[2];
372 unsigned long watermark_level;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000373 u32 shp_addr, per_addr;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000374 enum dma_status status;
Nicolin Chen0b351862014-06-16 11:32:29 +0800375 struct imx_dma_data data;
Robin Gongfe5b85c2018-06-20 00:57:04 +0800376 struct dma_pool *bd_pool;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000377};
378
Richard Zhao0bbc1412012-01-13 11:10:01 +0800379#define IMX_DMA_SG_LOOP BIT(0)
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000380
381#define MAX_DMA_CHANNELS 32
382#define MXC_SDMA_DEFAULT_PRIORITY 1
383#define MXC_SDMA_MIN_PRIORITY 1
384#define MXC_SDMA_MAX_PRIORITY 7
385
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000386#define SDMA_FIRMWARE_MAGIC 0x414d4453
387
388/**
389 * struct sdma_firmware_header - Layout of the firmware image
390 *
Robin Gong24ca3122018-07-04 18:06:42 +0800391 * @magic: "SDMA"
392 * @version_major: increased whenever layout of struct
393 * sdma_script_start_addrs changes.
394 * @version_minor: firmware minor version (for binary compatible changes)
395 * @script_addrs_start: offset of struct sdma_script_start_addrs in this image
396 * @num_script_addrs: Number of script addresses in this image
397 * @ram_code_start: offset of SDMA ram image in this firmware image
398 * @ram_code_size: size of SDMA ram image
399 * @script_addrs: Stores the start address of the SDMA scripts
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000400 * (in SDMA memory space)
401 */
402struct sdma_firmware_header {
403 u32 magic;
404 u32 version_major;
405 u32 version_minor;
406 u32 script_addrs_start;
407 u32 num_script_addrs;
408 u32 ram_code_start;
409 u32 ram_code_size;
410};
411
Sascha Hauer17bba722013-08-20 10:04:31 +0200412struct sdma_driver_data {
413 int chnenbl0;
414 int num_events;
Sascha Hauerdcfec3c2013-08-20 10:04:32 +0200415 struct sdma_script_start_addrs *script_addrs;
Shawn Guo62550cd2011-07-13 21:33:17 +0800416};
417
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000418struct sdma_engine {
419 struct device *dev;
Sascha Hauerb9b3f822011-01-12 12:12:31 +0100420 struct device_dma_parameters dma_parms;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000421 struct sdma_channel channel[MAX_DMA_CHANNELS];
422 struct sdma_channel_control *channel_control;
423 void __iomem *regs;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000424 struct sdma_context_data *context;
425 dma_addr_t context_phys;
426 struct dma_device dma_device;
Sascha Hauer7560e3f2012-03-07 09:30:06 +0100427 struct clk *clk_ipg;
428 struct clk *clk_ahb;
Richard Zhao2ccaef02012-05-11 15:14:27 +0800429 spinlock_t channel_0_lock;
Nicolin Chencd72b842013-11-13 22:55:24 +0800430 u32 script_number;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000431 struct sdma_script_start_addrs *script_addrs;
Sascha Hauer17bba722013-08-20 10:04:31 +0200432 const struct sdma_driver_data *drvdata;
Shengjiu Wang8391ecf2015-07-10 17:08:16 +0800433 u32 spba_start_addr;
434 u32 spba_end_addr;
Vinod Koul5bb9dbb2016-07-03 00:00:55 +0530435 unsigned int irq;
Sascha Hauer76c33d22018-06-20 00:56:59 +0800436 dma_addr_t bd0_phys;
437 struct sdma_buffer_descriptor *bd0;
Sascha Hauer17bba722013-08-20 10:04:31 +0200438};
439
Fabio Estevame9fd58d2013-09-01 21:57:12 -0300440static struct sdma_driver_data sdma_imx31 = {
Sascha Hauer17bba722013-08-20 10:04:31 +0200441 .chnenbl0 = SDMA_CHNENBL0_IMX31,
442 .num_events = 32,
443};
444
Sascha Hauerdcfec3c2013-08-20 10:04:32 +0200445static struct sdma_script_start_addrs sdma_script_imx25 = {
446 .ap_2_ap_addr = 729,
447 .uart_2_mcu_addr = 904,
448 .per_2_app_addr = 1255,
449 .mcu_2_app_addr = 834,
450 .uartsh_2_mcu_addr = 1120,
451 .per_2_shp_addr = 1329,
452 .mcu_2_shp_addr = 1048,
453 .ata_2_mcu_addr = 1560,
454 .mcu_2_ata_addr = 1479,
455 .app_2_per_addr = 1189,
456 .app_2_mcu_addr = 770,
457 .shp_2_per_addr = 1407,
458 .shp_2_mcu_addr = 979,
459};
460
Fabio Estevame9fd58d2013-09-01 21:57:12 -0300461static struct sdma_driver_data sdma_imx25 = {
Sascha Hauerdcfec3c2013-08-20 10:04:32 +0200462 .chnenbl0 = SDMA_CHNENBL0_IMX35,
463 .num_events = 48,
464 .script_addrs = &sdma_script_imx25,
465};
466
Fabio Estevame9fd58d2013-09-01 21:57:12 -0300467static struct sdma_driver_data sdma_imx35 = {
Sascha Hauer17bba722013-08-20 10:04:31 +0200468 .chnenbl0 = SDMA_CHNENBL0_IMX35,
469 .num_events = 48,
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000470};
471
Sascha Hauerdcfec3c2013-08-20 10:04:32 +0200472static struct sdma_script_start_addrs sdma_script_imx51 = {
473 .ap_2_ap_addr = 642,
474 .uart_2_mcu_addr = 817,
475 .mcu_2_app_addr = 747,
476 .mcu_2_shp_addr = 961,
477 .ata_2_mcu_addr = 1473,
478 .mcu_2_ata_addr = 1392,
479 .app_2_per_addr = 1033,
480 .app_2_mcu_addr = 683,
481 .shp_2_per_addr = 1251,
482 .shp_2_mcu_addr = 892,
483};
484
Fabio Estevame9fd58d2013-09-01 21:57:12 -0300485static struct sdma_driver_data sdma_imx51 = {
Sascha Hauerdcfec3c2013-08-20 10:04:32 +0200486 .chnenbl0 = SDMA_CHNENBL0_IMX35,
487 .num_events = 48,
488 .script_addrs = &sdma_script_imx51,
489};
490
491static struct sdma_script_start_addrs sdma_script_imx53 = {
492 .ap_2_ap_addr = 642,
493 .app_2_mcu_addr = 683,
494 .mcu_2_app_addr = 747,
495 .uart_2_mcu_addr = 817,
496 .shp_2_mcu_addr = 891,
497 .mcu_2_shp_addr = 960,
498 .uartsh_2_mcu_addr = 1032,
499 .spdif_2_mcu_addr = 1100,
500 .mcu_2_spdif_addr = 1134,
501 .firi_2_mcu_addr = 1193,
502 .mcu_2_firi_addr = 1290,
503};
504
Fabio Estevame9fd58d2013-09-01 21:57:12 -0300505static struct sdma_driver_data sdma_imx53 = {
Sascha Hauerdcfec3c2013-08-20 10:04:32 +0200506 .chnenbl0 = SDMA_CHNENBL0_IMX35,
507 .num_events = 48,
508 .script_addrs = &sdma_script_imx53,
509};
510
511static struct sdma_script_start_addrs sdma_script_imx6q = {
512 .ap_2_ap_addr = 642,
513 .uart_2_mcu_addr = 817,
514 .mcu_2_app_addr = 747,
515 .per_2_per_addr = 6331,
516 .uartsh_2_mcu_addr = 1032,
517 .mcu_2_shp_addr = 960,
518 .app_2_mcu_addr = 683,
519 .shp_2_mcu_addr = 891,
520 .spdif_2_mcu_addr = 1100,
521 .mcu_2_spdif_addr = 1134,
522};
523
Fabio Estevame9fd58d2013-09-01 21:57:12 -0300524static struct sdma_driver_data sdma_imx6q = {
Sascha Hauerdcfec3c2013-08-20 10:04:32 +0200525 .chnenbl0 = SDMA_CHNENBL0_IMX35,
526 .num_events = 48,
527 .script_addrs = &sdma_script_imx6q,
528};
529
Fabio Estevamb7d26482016-08-10 13:05:05 -0300530static struct sdma_script_start_addrs sdma_script_imx7d = {
531 .ap_2_ap_addr = 644,
532 .uart_2_mcu_addr = 819,
533 .mcu_2_app_addr = 749,
534 .uartsh_2_mcu_addr = 1034,
535 .mcu_2_shp_addr = 962,
536 .app_2_mcu_addr = 685,
537 .shp_2_mcu_addr = 893,
538 .spdif_2_mcu_addr = 1102,
539 .mcu_2_spdif_addr = 1136,
540};
541
542static struct sdma_driver_data sdma_imx7d = {
543 .chnenbl0 = SDMA_CHNENBL0_IMX35,
544 .num_events = 48,
545 .script_addrs = &sdma_script_imx7d,
546};
547
Krzysztof Kozlowskiafe7cde2015-05-02 00:57:46 +0900548static const struct platform_device_id sdma_devtypes[] = {
Shawn Guo62550cd2011-07-13 21:33:17 +0800549 {
Sascha Hauerdcfec3c2013-08-20 10:04:32 +0200550 .name = "imx25-sdma",
551 .driver_data = (unsigned long)&sdma_imx25,
552 }, {
Shawn Guo62550cd2011-07-13 21:33:17 +0800553 .name = "imx31-sdma",
Sascha Hauer17bba722013-08-20 10:04:31 +0200554 .driver_data = (unsigned long)&sdma_imx31,
Shawn Guo62550cd2011-07-13 21:33:17 +0800555 }, {
556 .name = "imx35-sdma",
Sascha Hauer17bba722013-08-20 10:04:31 +0200557 .driver_data = (unsigned long)&sdma_imx35,
Shawn Guo62550cd2011-07-13 21:33:17 +0800558 }, {
Sascha Hauerdcfec3c2013-08-20 10:04:32 +0200559 .name = "imx51-sdma",
560 .driver_data = (unsigned long)&sdma_imx51,
561 }, {
562 .name = "imx53-sdma",
563 .driver_data = (unsigned long)&sdma_imx53,
564 }, {
565 .name = "imx6q-sdma",
566 .driver_data = (unsigned long)&sdma_imx6q,
567 }, {
Fabio Estevamb7d26482016-08-10 13:05:05 -0300568 .name = "imx7d-sdma",
569 .driver_data = (unsigned long)&sdma_imx7d,
570 }, {
Shawn Guo62550cd2011-07-13 21:33:17 +0800571 /* sentinel */
572 }
573};
574MODULE_DEVICE_TABLE(platform, sdma_devtypes);
575
Shawn Guo580975d2011-07-14 08:35:48 +0800576static const struct of_device_id sdma_dt_ids[] = {
Sascha Hauerdcfec3c2013-08-20 10:04:32 +0200577 { .compatible = "fsl,imx6q-sdma", .data = &sdma_imx6q, },
578 { .compatible = "fsl,imx53-sdma", .data = &sdma_imx53, },
579 { .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, },
Sascha Hauer17bba722013-08-20 10:04:31 +0200580 { .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
Sascha Hauerdcfec3c2013-08-20 10:04:32 +0200581 { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
Markus Pargmann63edea12014-02-16 20:10:55 +0100582 { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
Fabio Estevamb7d26482016-08-10 13:05:05 -0300583 { .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, },
Shawn Guo580975d2011-07-14 08:35:48 +0800584 { /* sentinel */ }
585};
586MODULE_DEVICE_TABLE(of, sdma_dt_ids);
587
Richard Zhao0bbc1412012-01-13 11:10:01 +0800588#define SDMA_H_CONFIG_DSPDMA BIT(12) /* indicates if the DSPDMA is used */
589#define SDMA_H_CONFIG_RTD_PINS BIT(11) /* indicates if Real-Time Debug pins are enabled */
590#define SDMA_H_CONFIG_ACR BIT(4) /* indicates if AHB freq /core freq = 2 or 1 */
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000591#define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/
592
593static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
594{
Sascha Hauer17bba722013-08-20 10:04:31 +0200595 u32 chnenbl0 = sdma->drvdata->chnenbl0;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000596 return chnenbl0 + event * 4;
597}
598
599static int sdma_config_ownership(struct sdma_channel *sdmac,
600 bool event_override, bool mcu_override, bool dsp_override)
601{
602 struct sdma_engine *sdma = sdmac->sdma;
603 int channel = sdmac->channel;
Richard Zhao0bbc1412012-01-13 11:10:01 +0800604 unsigned long evt, mcu, dsp;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000605
606 if (event_override && mcu_override && dsp_override)
607 return -EINVAL;
608
Richard Zhaoc4b56852012-01-13 11:09:57 +0800609 evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR);
610 mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR);
611 dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR);
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000612
613 if (dsp_override)
Richard Zhao0bbc1412012-01-13 11:10:01 +0800614 __clear_bit(channel, &dsp);
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000615 else
Richard Zhao0bbc1412012-01-13 11:10:01 +0800616 __set_bit(channel, &dsp);
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000617
618 if (event_override)
Richard Zhao0bbc1412012-01-13 11:10:01 +0800619 __clear_bit(channel, &evt);
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000620 else
Richard Zhao0bbc1412012-01-13 11:10:01 +0800621 __set_bit(channel, &evt);
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000622
623 if (mcu_override)
Richard Zhao0bbc1412012-01-13 11:10:01 +0800624 __clear_bit(channel, &mcu);
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000625 else
Richard Zhao0bbc1412012-01-13 11:10:01 +0800626 __set_bit(channel, &mcu);
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000627
Richard Zhaoc4b56852012-01-13 11:09:57 +0800628 writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR);
629 writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR);
630 writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR);
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000631
632 return 0;
633}
634
Richard Zhaob9a591662012-01-13 11:09:56 +0800635static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
636{
Richard Zhao0bbc1412012-01-13 11:10:01 +0800637 writel(BIT(channel), sdma->regs + SDMA_H_START);
Richard Zhaob9a591662012-01-13 11:09:56 +0800638}
639
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000640/*
Richard Zhao2ccaef02012-05-11 15:14:27 +0800641 * sdma_run_channel0 - run a channel and wait till it's done
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000642 */
Richard Zhao2ccaef02012-05-11 15:14:27 +0800643static int sdma_run_channel0(struct sdma_engine *sdma)
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000644{
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000645 int ret;
Michael Olbrich1d069bf2016-07-07 11:35:51 +0200646 u32 reg;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000647
Richard Zhao2ccaef02012-05-11 15:14:27 +0800648 sdma_enable_channel(sdma, 0);
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000649
Michael Olbrich1d069bf2016-07-07 11:35:51 +0200650 ret = readl_relaxed_poll_timeout_atomic(sdma->regs + SDMA_H_STATSTOP,
651 reg, !(reg & 1), 1, 500);
652 if (ret)
Richard Zhao2ccaef02012-05-11 15:14:27 +0800653 dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000654
Robin Gong855832e2015-02-15 10:00:35 +0800655 /* Set bits of CONFIG register with dynamic context switching */
656 if (readl(sdma->regs + SDMA_H_CONFIG) == 0)
657 writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
658
Michael Olbrich1d069bf2016-07-07 11:35:51 +0200659 return ret;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000660}
661
662static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
663 u32 address)
664{
Sascha Hauer76c33d22018-06-20 00:56:59 +0800665 struct sdma_buffer_descriptor *bd0 = sdma->bd0;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000666 void *buf_virt;
667 dma_addr_t buf_phys;
668 int ret;
Richard Zhao2ccaef02012-05-11 15:14:27 +0800669 unsigned long flags;
Sascha Hauer73eab972011-08-25 11:03:35 +0200670
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000671 buf_virt = dma_alloc_coherent(NULL,
672 size,
673 &buf_phys, GFP_KERNEL);
Sascha Hauer73eab972011-08-25 11:03:35 +0200674 if (!buf_virt) {
Richard Zhao2ccaef02012-05-11 15:14:27 +0800675 return -ENOMEM;
Sascha Hauer73eab972011-08-25 11:03:35 +0200676 }
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000677
Richard Zhao2ccaef02012-05-11 15:14:27 +0800678 spin_lock_irqsave(&sdma->channel_0_lock, flags);
679
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000680 bd0->mode.command = C0_SETPM;
681 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
682 bd0->mode.count = size / 2;
683 bd0->buffer_addr = buf_phys;
684 bd0->ext_buffer_addr = address;
685
686 memcpy(buf_virt, buf, size);
687
Richard Zhao2ccaef02012-05-11 15:14:27 +0800688 ret = sdma_run_channel0(sdma);
689
690 spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000691
692 dma_free_coherent(NULL, size, buf_virt, buf_phys);
693
694 return ret;
695}
696
697static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
698{
699 struct sdma_engine *sdma = sdmac->sdma;
700 int channel = sdmac->channel;
Richard Zhao0bbc1412012-01-13 11:10:01 +0800701 unsigned long val;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000702 u32 chnenbl = chnenbl_ofs(sdma, event);
703
Richard Zhaoc4b56852012-01-13 11:09:57 +0800704 val = readl_relaxed(sdma->regs + chnenbl);
Richard Zhao0bbc1412012-01-13 11:10:01 +0800705 __set_bit(channel, &val);
Richard Zhaoc4b56852012-01-13 11:09:57 +0800706 writel_relaxed(val, sdma->regs + chnenbl);
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000707}
708
709static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
710{
711 struct sdma_engine *sdma = sdmac->sdma;
712 int channel = sdmac->channel;
713 u32 chnenbl = chnenbl_ofs(sdma, event);
Richard Zhao0bbc1412012-01-13 11:10:01 +0800714 unsigned long val;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000715
Richard Zhaoc4b56852012-01-13 11:09:57 +0800716 val = readl_relaxed(sdma->regs + chnenbl);
Richard Zhao0bbc1412012-01-13 11:10:01 +0800717 __clear_bit(channel, &val);
Richard Zhaoc4b56852012-01-13 11:09:57 +0800718 writel_relaxed(val, sdma->regs + chnenbl);
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000719}
720
Robin Gong57b772b2018-06-20 00:57:00 +0800721static struct sdma_desc *to_sdma_desc(struct dma_async_tx_descriptor *t)
722{
723 return container_of(t, struct sdma_desc, vd.tx);
724}
725
726static void sdma_start_desc(struct sdma_channel *sdmac)
727{
728 struct virt_dma_desc *vd = vchan_next_desc(&sdmac->vc);
729 struct sdma_desc *desc;
730 struct sdma_engine *sdma = sdmac->sdma;
731 int channel = sdmac->channel;
732
733 if (!vd) {
734 sdmac->desc = NULL;
735 return;
736 }
737 sdmac->desc = desc = to_sdma_desc(&vd->tx);
738 /*
739 * Do not delete the node in desc_issued list in cyclic mode, otherwise
Vinod Koul680302c2018-07-02 18:34:02 +0530740 * the desc allocated will never be freed in vchan_dma_desc_free_list
Robin Gong57b772b2018-06-20 00:57:00 +0800741 */
742 if (!(sdmac->flags & IMX_DMA_SG_LOOP))
743 list_del(&vd->node);
744
745 sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
746 sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
747 sdma_enable_channel(sdma, sdmac->channel);
748}
749
Russell King - ARM Linuxd1a792f2014-06-25 13:00:33 +0100750static void sdma_update_channel_loop(struct sdma_channel *sdmac)
751{
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000752 struct sdma_buffer_descriptor *bd;
Nandor Han58818262016-08-08 15:38:26 +0300753 int error = 0;
754 enum dma_status old_status = sdmac->status;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000755
756 /*
757 * loop mode. Iterate over descriptors, re-setup them and
758 * call callback function.
759 */
Robin Gong57b772b2018-06-20 00:57:00 +0800760 while (sdmac->desc) {
Sascha Hauer76c33d22018-06-20 00:56:59 +0800761 struct sdma_desc *desc = sdmac->desc;
762
763 bd = &desc->bd[desc->buf_tail];
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000764
765 if (bd->mode.status & BD_DONE)
766 break;
767
Nandor Han58818262016-08-08 15:38:26 +0300768 if (bd->mode.status & BD_RROR) {
769 bd->mode.status &= ~BD_RROR;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000770 sdmac->status = DMA_ERROR;
Nandor Han58818262016-08-08 15:38:26 +0300771 error = -EIO;
772 }
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000773
Nandor Han58818262016-08-08 15:38:26 +0300774 /*
775 * We use bd->mode.count to calculate the residue, since contains
776 * the number of bytes present in the current buffer descriptor.
777 */
778
Sascha Hauer76c33d22018-06-20 00:56:59 +0800779 desc->chn_real_count = bd->mode.count;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000780 bd->mode.status |= BD_DONE;
Sascha Hauer76c33d22018-06-20 00:56:59 +0800781 bd->mode.count = desc->period_len;
782 desc->buf_ptail = desc->buf_tail;
783 desc->buf_tail = (desc->buf_tail + 1) % desc->num_bd;
Nandor Han15f30f52016-08-08 15:38:25 +0300784
785 /*
786 * The callback is called from the interrupt context in order
787 * to reduce latency and to avoid the risk of altering the
788 * SDMA transaction status by the time the client tasklet is
789 * executed.
790 */
Robin Gong57b772b2018-06-20 00:57:00 +0800791 spin_unlock(&sdmac->vc.lock);
792 dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL);
793 spin_lock(&sdmac->vc.lock);
Nandor Han15f30f52016-08-08 15:38:25 +0300794
Nandor Han58818262016-08-08 15:38:26 +0300795 if (error)
796 sdmac->status = old_status;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000797 }
798}
799
Robin Gong57b772b2018-06-20 00:57:00 +0800800static void mxc_sdma_handle_channel_normal(struct sdma_channel *data)
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000801{
Nandor Han15f30f52016-08-08 15:38:25 +0300802 struct sdma_channel *sdmac = (struct sdma_channel *) data;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000803 struct sdma_buffer_descriptor *bd;
804 int i, error = 0;
805
Sascha Hauer76c33d22018-06-20 00:56:59 +0800806 sdmac->desc->chn_real_count = 0;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000807 /*
808 * non loop mode. Iterate over all descriptors, collect
809 * errors and call callback function
810 */
Sascha Hauer76c33d22018-06-20 00:56:59 +0800811 for (i = 0; i < sdmac->desc->num_bd; i++) {
812 bd = &sdmac->desc->bd[i];
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000813
814 if (bd->mode.status & (BD_DONE | BD_RROR))
815 error = -EIO;
Sascha Hauer76c33d22018-06-20 00:56:59 +0800816 sdmac->desc->chn_real_count += bd->mode.count;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000817 }
818
819 if (error)
820 sdmac->status = DMA_ERROR;
821 else
Vinod Koul409bff62013-10-16 14:07:06 +0530822 sdmac->status = DMA_COMPLETE;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000823}
824
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000825static irqreturn_t sdma_int_handler(int irq, void *dev_id)
826{
827 struct sdma_engine *sdma = dev_id;
Richard Zhao0bbc1412012-01-13 11:10:01 +0800828 unsigned long stat;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000829
Richard Zhaoc4b56852012-01-13 11:09:57 +0800830 stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
831 writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
Michael Olbrich1d069bf2016-07-07 11:35:51 +0200832 /* channel 0 is special and not handled here, see run_channel0() */
833 stat &= ~1;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000834
835 while (stat) {
836 int channel = fls(stat) - 1;
837 struct sdma_channel *sdmac = &sdma->channel[channel];
Robin Gong57b772b2018-06-20 00:57:00 +0800838 struct sdma_desc *desc;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000839
Robin Gong57b772b2018-06-20 00:57:00 +0800840 spin_lock(&sdmac->vc.lock);
841 desc = sdmac->desc;
842 if (desc) {
843 if (sdmac->flags & IMX_DMA_SG_LOOP) {
844 sdma_update_channel_loop(sdmac);
845 } else {
846 mxc_sdma_handle_channel_normal(sdmac);
847 vchan_cookie_complete(&desc->vd);
848 sdma_start_desc(sdmac);
849 }
850 }
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000851
Robin Gong57b772b2018-06-20 00:57:00 +0800852 spin_unlock(&sdmac->vc.lock);
Richard Zhao0bbc1412012-01-13 11:10:01 +0800853 __clear_bit(channel, &stat);
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000854 }
855
856 return IRQ_HANDLED;
857}
858
859/*
860 * sets the pc of SDMA script according to the peripheral type
861 */
862static void sdma_get_pc(struct sdma_channel *sdmac,
863 enum sdma_peripheral_type peripheral_type)
864{
865 struct sdma_engine *sdma = sdmac->sdma;
866 int per_2_emi = 0, emi_2_per = 0;
867 /*
868 * These are needed once we start to support transfers between
869 * two peripherals or memory-to-memory transfers
870 */
Vinod Koul0d605ba2016-07-08 10:43:27 +0530871 int per_2_per = 0;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000872
873 sdmac->pc_from_device = 0;
874 sdmac->pc_to_device = 0;
Shengjiu Wang8391ecf2015-07-10 17:08:16 +0800875 sdmac->device_to_device = 0;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000876
877 switch (peripheral_type) {
878 case IMX_DMATYPE_MEMORY:
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000879 break;
880 case IMX_DMATYPE_DSP:
881 emi_2_per = sdma->script_addrs->bp_2_ap_addr;
882 per_2_emi = sdma->script_addrs->ap_2_bp_addr;
883 break;
884 case IMX_DMATYPE_FIRI:
885 per_2_emi = sdma->script_addrs->firi_2_mcu_addr;
886 emi_2_per = sdma->script_addrs->mcu_2_firi_addr;
887 break;
888 case IMX_DMATYPE_UART:
889 per_2_emi = sdma->script_addrs->uart_2_mcu_addr;
890 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
891 break;
892 case IMX_DMATYPE_UART_SP:
893 per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr;
894 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
895 break;
896 case IMX_DMATYPE_ATA:
897 per_2_emi = sdma->script_addrs->ata_2_mcu_addr;
898 emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
899 break;
900 case IMX_DMATYPE_CSPI:
901 case IMX_DMATYPE_EXT:
902 case IMX_DMATYPE_SSI:
Nicolin Chen29aebfd2014-10-24 12:37:41 -0700903 case IMX_DMATYPE_SAI:
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000904 per_2_emi = sdma->script_addrs->app_2_mcu_addr;
905 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
906 break;
Nicolin Chen1a895572013-11-13 22:55:25 +0800907 case IMX_DMATYPE_SSI_DUAL:
908 per_2_emi = sdma->script_addrs->ssish_2_mcu_addr;
909 emi_2_per = sdma->script_addrs->mcu_2_ssish_addr;
910 break;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000911 case IMX_DMATYPE_SSI_SP:
912 case IMX_DMATYPE_MMC:
913 case IMX_DMATYPE_SDHC:
914 case IMX_DMATYPE_CSPI_SP:
915 case IMX_DMATYPE_ESAI:
916 case IMX_DMATYPE_MSHC_SP:
917 per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
918 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
919 break;
920 case IMX_DMATYPE_ASRC:
921 per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
922 emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
923 per_2_per = sdma->script_addrs->per_2_per_addr;
924 break;
Nicolin Chenf892afb2014-06-16 11:31:05 +0800925 case IMX_DMATYPE_ASRC_SP:
926 per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
927 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
928 per_2_per = sdma->script_addrs->per_2_per_addr;
929 break;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000930 case IMX_DMATYPE_MSHC:
931 per_2_emi = sdma->script_addrs->mshc_2_mcu_addr;
932 emi_2_per = sdma->script_addrs->mcu_2_mshc_addr;
933 break;
934 case IMX_DMATYPE_CCM:
935 per_2_emi = sdma->script_addrs->dptc_dvfs_addr;
936 break;
937 case IMX_DMATYPE_SPDIF:
938 per_2_emi = sdma->script_addrs->spdif_2_mcu_addr;
939 emi_2_per = sdma->script_addrs->mcu_2_spdif_addr;
940 break;
941 case IMX_DMATYPE_IPU_MEMORY:
942 emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
943 break;
944 default:
945 break;
946 }
947
948 sdmac->pc_from_device = per_2_emi;
949 sdmac->pc_to_device = emi_2_per;
Shengjiu Wang8391ecf2015-07-10 17:08:16 +0800950 sdmac->device_to_device = per_2_per;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000951}
952
953static int sdma_load_context(struct sdma_channel *sdmac)
954{
955 struct sdma_engine *sdma = sdmac->sdma;
956 int channel = sdmac->channel;
957 int load_address;
958 struct sdma_context_data *context = sdma->context;
Sascha Hauer76c33d22018-06-20 00:56:59 +0800959 struct sdma_buffer_descriptor *bd0 = sdma->bd0;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000960 int ret;
Richard Zhao2ccaef02012-05-11 15:14:27 +0800961 unsigned long flags;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000962
Shengjiu Wang8391ecf2015-07-10 17:08:16 +0800963 if (sdmac->direction == DMA_DEV_TO_MEM)
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000964 load_address = sdmac->pc_from_device;
Shengjiu Wang8391ecf2015-07-10 17:08:16 +0800965 else if (sdmac->direction == DMA_DEV_TO_DEV)
966 load_address = sdmac->device_to_device;
967 else
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000968 load_address = sdmac->pc_to_device;
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000969
970 if (load_address < 0)
971 return load_address;
972
973 dev_dbg(sdma->dev, "load_address = %d\n", load_address);
Richard Zhao0bbc1412012-01-13 11:10:01 +0800974 dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level);
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000975 dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
976 dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
Richard Zhao0bbc1412012-01-13 11:10:01 +0800977 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
978 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000979
Richard Zhao2ccaef02012-05-11 15:14:27 +0800980 spin_lock_irqsave(&sdma->channel_0_lock, flags);
Sascha Hauer73eab972011-08-25 11:03:35 +0200981
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000982 memset(context, 0, sizeof(*context));
983 context->channel_state.pc = load_address;
984
985 /* Send by context the event mask,base address for peripheral
986 * and watermark level
987 */
Richard Zhao0bbc1412012-01-13 11:10:01 +0800988 context->gReg[0] = sdmac->event_mask[1];
989 context->gReg[1] = sdmac->event_mask[0];
Sascha Hauer1ec1e822010-09-30 13:56:34 +0000990 context->gReg[2] = sdmac->per_addr;
991 context->gReg[6] = sdmac->shp_addr;
992 context->gReg[7] = sdmac->watermark_level;
993
994 bd0->mode.command = C0_SETDM;
995 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
996 bd0->mode.count = sizeof(*context) / 4;
997 bd0->buffer_addr = sdma->context_phys;
998 bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
Richard Zhao2ccaef02012-05-11 15:14:27 +0800999 ret = sdma_run_channel0(sdma);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001000
Richard Zhao2ccaef02012-05-11 15:14:27 +08001001 spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
Sascha Hauer73eab972011-08-25 11:03:35 +02001002
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001003 return ret;
1004}
1005
Maxime Ripard7b350ab2014-11-17 14:42:17 +01001006static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001007{
Robin Gong57b772b2018-06-20 00:57:00 +08001008 return container_of(chan, struct sdma_channel, vc.chan);
Maxime Ripard7b350ab2014-11-17 14:42:17 +01001009}
1010
1011static int sdma_disable_channel(struct dma_chan *chan)
1012{
1013 struct sdma_channel *sdmac = to_sdma_chan(chan);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001014 struct sdma_engine *sdma = sdmac->sdma;
1015 int channel = sdmac->channel;
1016
Richard Zhao0bbc1412012-01-13 11:10:01 +08001017 writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001018 sdmac->status = DMA_ERROR;
Maxime Ripard7b350ab2014-11-17 14:42:17 +01001019
1020 return 0;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001021}
1022
Jiada Wang7f3ff142017-03-16 23:12:09 -07001023static int sdma_disable_channel_with_delay(struct dma_chan *chan)
1024{
Robin Gong57b772b2018-06-20 00:57:00 +08001025 struct sdma_channel *sdmac = to_sdma_chan(chan);
1026 unsigned long flags;
1027 LIST_HEAD(head);
1028
Jiada Wang7f3ff142017-03-16 23:12:09 -07001029 sdma_disable_channel(chan);
Robin Gong57b772b2018-06-20 00:57:00 +08001030 spin_lock_irqsave(&sdmac->vc.lock, flags);
1031 vchan_get_all_descriptors(&sdmac->vc, &head);
1032 sdmac->desc = NULL;
1033 spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1034 vchan_dma_desc_free_list(&sdmac->vc, &head);
Jiada Wang7f3ff142017-03-16 23:12:09 -07001035
1036 /*
1037 * According to NXP R&D team a delay of one BD SDMA cost time
1038 * (maximum is 1ms) should be added after disable of the channel
1039 * bit, to ensure SDMA core has really been stopped after SDMA
1040 * clients call .device_terminate_all.
1041 */
1042 mdelay(1);
1043
1044 return 0;
1045}
1046
Shengjiu Wang8391ecf2015-07-10 17:08:16 +08001047static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
1048{
1049 struct sdma_engine *sdma = sdmac->sdma;
1050
1051 int lwml = sdmac->watermark_level & SDMA_WATERMARK_LEVEL_LWML;
1052 int hwml = (sdmac->watermark_level & SDMA_WATERMARK_LEVEL_HWML) >> 16;
1053
1054 set_bit(sdmac->event_id0 % 32, &sdmac->event_mask[1]);
1055 set_bit(sdmac->event_id1 % 32, &sdmac->event_mask[0]);
1056
1057 if (sdmac->event_id0 > 31)
1058 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_LWE;
1059
1060 if (sdmac->event_id1 > 31)
1061 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_HWE;
1062
1063 /*
1064 * If LWML(src_maxburst) > HWML(dst_maxburst), we need
1065 * swap LWML and HWML of INFO(A.3.2.5.1), also need swap
1066 * r0(event_mask[1]) and r1(event_mask[0]).
1067 */
1068 if (lwml > hwml) {
1069 sdmac->watermark_level &= ~(SDMA_WATERMARK_LEVEL_LWML |
1070 SDMA_WATERMARK_LEVEL_HWML);
1071 sdmac->watermark_level |= hwml;
1072 sdmac->watermark_level |= lwml << 16;
1073 swap(sdmac->event_mask[0], sdmac->event_mask[1]);
1074 }
1075
1076 if (sdmac->per_address2 >= sdma->spba_start_addr &&
1077 sdmac->per_address2 <= sdma->spba_end_addr)
1078 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SP;
1079
1080 if (sdmac->per_address >= sdma->spba_start_addr &&
1081 sdmac->per_address <= sdma->spba_end_addr)
1082 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP;
1083
1084 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT;
1085}
1086
Maxime Ripard7b350ab2014-11-17 14:42:17 +01001087static int sdma_config_channel(struct dma_chan *chan)
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001088{
Maxime Ripard7b350ab2014-11-17 14:42:17 +01001089 struct sdma_channel *sdmac = to_sdma_chan(chan);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001090 int ret;
1091
Maxime Ripard7b350ab2014-11-17 14:42:17 +01001092 sdma_disable_channel(chan);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001093
Richard Zhao0bbc1412012-01-13 11:10:01 +08001094 sdmac->event_mask[0] = 0;
1095 sdmac->event_mask[1] = 0;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001096 sdmac->shp_addr = 0;
1097 sdmac->per_addr = 0;
1098
1099 if (sdmac->event_id0) {
Sascha Hauer17bba722013-08-20 10:04:31 +02001100 if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001101 return -EINVAL;
1102 sdma_event_enable(sdmac, sdmac->event_id0);
1103 }
1104
Shengjiu Wang8391ecf2015-07-10 17:08:16 +08001105 if (sdmac->event_id1) {
1106 if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
1107 return -EINVAL;
1108 sdma_event_enable(sdmac, sdmac->event_id1);
1109 }
1110
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001111 switch (sdmac->peripheral_type) {
1112 case IMX_DMATYPE_DSP:
1113 sdma_config_ownership(sdmac, false, true, true);
1114 break;
1115 case IMX_DMATYPE_MEMORY:
1116 sdma_config_ownership(sdmac, false, true, false);
1117 break;
1118 default:
1119 sdma_config_ownership(sdmac, true, true, false);
1120 break;
1121 }
1122
1123 sdma_get_pc(sdmac, sdmac->peripheral_type);
1124
1125 if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
1126 (sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
1127 /* Handle multiple event channels differently */
1128 if (sdmac->event_id1) {
Shengjiu Wang8391ecf2015-07-10 17:08:16 +08001129 if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP ||
1130 sdmac->peripheral_type == IMX_DMATYPE_ASRC)
1131 sdma_set_watermarklevel_for_p2p(sdmac);
1132 } else
Richard Zhao0bbc1412012-01-13 11:10:01 +08001133 __set_bit(sdmac->event_id0, sdmac->event_mask);
Shengjiu Wang8391ecf2015-07-10 17:08:16 +08001134
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001135 /* Address */
1136 sdmac->shp_addr = sdmac->per_address;
Shengjiu Wang8391ecf2015-07-10 17:08:16 +08001137 sdmac->per_addr = sdmac->per_address2;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001138 } else {
1139 sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
1140 }
1141
1142 ret = sdma_load_context(sdmac);
1143
1144 return ret;
1145}
1146
1147static int sdma_set_channel_priority(struct sdma_channel *sdmac,
1148 unsigned int priority)
1149{
1150 struct sdma_engine *sdma = sdmac->sdma;
1151 int channel = sdmac->channel;
1152
1153 if (priority < MXC_SDMA_MIN_PRIORITY
1154 || priority > MXC_SDMA_MAX_PRIORITY) {
1155 return -EINVAL;
1156 }
1157
Richard Zhaoc4b56852012-01-13 11:09:57 +08001158 writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001159
1160 return 0;
1161}
1162
Robin Gong57b772b2018-06-20 00:57:00 +08001163static int sdma_request_channel0(struct sdma_engine *sdma)
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001164{
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001165 int ret = -EBUSY;
1166
Robin Gong57b772b2018-06-20 00:57:00 +08001167 sdma->bd0 = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys,
1168 GFP_NOWAIT);
1169 if (!sdma->bd0) {
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001170 ret = -ENOMEM;
1171 goto out;
1172 }
1173
Robin Gong57b772b2018-06-20 00:57:00 +08001174 sdma->channel_control[0].base_bd_ptr = sdma->bd0_phys;
1175 sdma->channel_control[0].current_bd_ptr = sdma->bd0_phys;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001176
Robin Gong57b772b2018-06-20 00:57:00 +08001177 sdma_set_channel_priority(&sdma->channel[0], MXC_SDMA_DEFAULT_PRIORITY);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001178 return 0;
1179out:
1180
1181 return ret;
1182}
1183
Robin Gong57b772b2018-06-20 00:57:00 +08001184
1185static int sdma_alloc_bd(struct sdma_desc *desc)
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001186{
Robin Gong57b772b2018-06-20 00:57:00 +08001187 int ret = 0;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001188
Vinod Koulc1199872018-07-02 18:37:27 +05301189 desc->bd = dma_pool_alloc(desc->sdmac->bd_pool, GFP_NOWAIT,
1190 &desc->bd_phys);
Robin Gong57b772b2018-06-20 00:57:00 +08001191 if (!desc->bd) {
1192 ret = -ENOMEM;
1193 goto out;
1194 }
1195out:
1196 return ret;
1197}
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001198
Robin Gong57b772b2018-06-20 00:57:00 +08001199static void sdma_free_bd(struct sdma_desc *desc)
1200{
Robin Gongfe5b85c2018-06-20 00:57:04 +08001201 dma_pool_free(desc->sdmac->bd_pool, desc->bd, desc->bd_phys);
Robin Gong57b772b2018-06-20 00:57:00 +08001202}
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001203
Robin Gong57b772b2018-06-20 00:57:00 +08001204static void sdma_desc_free(struct virt_dma_desc *vd)
1205{
1206 struct sdma_desc *desc = container_of(vd, struct sdma_desc, vd);
1207
1208 sdma_free_bd(desc);
1209 kfree(desc);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001210}
1211
1212static int sdma_alloc_chan_resources(struct dma_chan *chan)
1213{
1214 struct sdma_channel *sdmac = to_sdma_chan(chan);
1215 struct imx_dma_data *data = chan->private;
1216 int prio, ret;
1217
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001218 if (!data)
1219 return -EINVAL;
1220
1221 switch (data->priority) {
1222 case DMA_PRIO_HIGH:
1223 prio = 3;
1224 break;
1225 case DMA_PRIO_MEDIUM:
1226 prio = 2;
1227 break;
1228 case DMA_PRIO_LOW:
1229 default:
1230 prio = 1;
1231 break;
1232 }
1233
1234 sdmac->peripheral_type = data->peripheral_type;
1235 sdmac->event_id0 = data->dma_request;
Shengjiu Wang8391ecf2015-07-10 17:08:16 +08001236 sdmac->event_id1 = data->dma_request2;
Richard Zhaoc2c744d2012-01-13 11:09:59 +08001237
Fabio Estevamb93edcd2015-07-29 21:03:49 -03001238 ret = clk_enable(sdmac->sdma->clk_ipg);
1239 if (ret)
1240 return ret;
1241 ret = clk_enable(sdmac->sdma->clk_ahb);
1242 if (ret)
1243 goto disable_clk_ipg;
Richard Zhaoc2c744d2012-01-13 11:09:59 +08001244
Richard Zhao3bb5e7c2012-01-13 11:09:58 +08001245 ret = sdma_set_channel_priority(sdmac, prio);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001246 if (ret)
Fabio Estevamb93edcd2015-07-29 21:03:49 -03001247 goto disable_clk_ahb;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001248
Robin Gongfe5b85c2018-06-20 00:57:04 +08001249 sdmac->bd_pool = dma_pool_create("bd_pool", chan->device->dev,
1250 sizeof(struct sdma_buffer_descriptor),
1251 32, 0);
1252
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001253 return 0;
Fabio Estevamb93edcd2015-07-29 21:03:49 -03001254
1255disable_clk_ahb:
1256 clk_disable(sdmac->sdma->clk_ahb);
1257disable_clk_ipg:
1258 clk_disable(sdmac->sdma->clk_ipg);
1259 return ret;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001260}
1261
1262static void sdma_free_chan_resources(struct dma_chan *chan)
1263{
1264 struct sdma_channel *sdmac = to_sdma_chan(chan);
1265 struct sdma_engine *sdma = sdmac->sdma;
1266
Robin Gong57b772b2018-06-20 00:57:00 +08001267 sdma_disable_channel_with_delay(chan);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001268
1269 if (sdmac->event_id0)
1270 sdma_event_disable(sdmac, sdmac->event_id0);
1271 if (sdmac->event_id1)
1272 sdma_event_disable(sdmac, sdmac->event_id1);
1273
1274 sdmac->event_id0 = 0;
1275 sdmac->event_id1 = 0;
1276
1277 sdma_set_channel_priority(sdmac, 0);
1278
Sascha Hauer7560e3f2012-03-07 09:30:06 +01001279 clk_disable(sdma->clk_ipg);
1280 clk_disable(sdma->clk_ahb);
Robin Gongfe5b85c2018-06-20 00:57:04 +08001281
1282 dma_pool_destroy(sdmac->bd_pool);
1283 sdmac->bd_pool = NULL;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001284}
1285
Robin Gong21420842018-06-20 00:57:03 +08001286static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
1287 enum dma_transfer_direction direction, u32 bds)
1288{
1289 struct sdma_desc *desc;
1290
1291 desc = kzalloc((sizeof(*desc)), GFP_NOWAIT);
1292 if (!desc)
1293 goto err_out;
1294
1295 sdmac->status = DMA_IN_PROGRESS;
1296 sdmac->direction = direction;
1297 sdmac->flags = 0;
1298
1299 desc->chn_count = 0;
1300 desc->chn_real_count = 0;
1301 desc->buf_tail = 0;
1302 desc->buf_ptail = 0;
1303 desc->sdmac = sdmac;
1304 desc->num_bd = bds;
1305
1306 if (sdma_alloc_bd(desc))
1307 goto err_desc_out;
1308
1309 if (sdma_load_context(sdmac))
1310 goto err_desc_out;
1311
1312 return desc;
1313
1314err_desc_out:
1315 kfree(desc);
1316err_out:
1317 return NULL;
1318}
1319
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001320static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
1321 struct dma_chan *chan, struct scatterlist *sgl,
Vinod Kouldb8196d2011-10-13 22:34:23 +05301322 unsigned int sg_len, enum dma_transfer_direction direction,
Alexandre Bounine185ecb52012-03-08 15:35:13 -05001323 unsigned long flags, void *context)
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001324{
1325 struct sdma_channel *sdmac = to_sdma_chan(chan);
1326 struct sdma_engine *sdma = sdmac->sdma;
Vinod Koulad78b002018-07-02 18:42:51 +05301327 int i, count;
Sascha Hauer23889c62011-01-31 10:56:58 +01001328 int channel = sdmac->channel;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001329 struct scatterlist *sg;
Robin Gong57b772b2018-06-20 00:57:00 +08001330 struct sdma_desc *desc;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001331
Robin Gong21420842018-06-20 00:57:03 +08001332 desc = sdma_transfer_init(sdmac, direction, sg_len);
Robin Gong57b772b2018-06-20 00:57:00 +08001333 if (!desc)
1334 goto err_out;
1335
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001336 dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
1337 sg_len, channel);
1338
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001339 for_each_sg(sgl, sg, sg_len, i) {
Sascha Hauer76c33d22018-06-20 00:56:59 +08001340 struct sdma_buffer_descriptor *bd = &desc->bd[i];
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001341 int param;
1342
Anatolij Gustschind2f5c272010-11-22 18:35:18 +01001343 bd->buffer_addr = sg->dma_address;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001344
Lars-Peter Clausenfdaf9c42012-04-25 20:50:52 +02001345 count = sg_dma_len(sg);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001346
1347 if (count > 0xffff) {
1348 dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
1349 channel, count, 0xffff);
Robin Gong57b772b2018-06-20 00:57:00 +08001350 goto err_bd_out;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001351 }
1352
1353 bd->mode.count = count;
Sascha Hauer76c33d22018-06-20 00:56:59 +08001354 desc->chn_count += count;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001355
Vinod Koulad78b002018-07-02 18:42:51 +05301356 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
Robin Gong57b772b2018-06-20 00:57:00 +08001357 goto err_bd_out;
Sascha Hauer1fa81c22011-01-12 13:02:28 +01001358
1359 switch (sdmac->word_size) {
1360 case DMA_SLAVE_BUSWIDTH_4_BYTES:
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001361 bd->mode.command = 0;
Sascha Hauer1fa81c22011-01-12 13:02:28 +01001362 if (count & 3 || sg->dma_address & 3)
Robin Gong57b772b2018-06-20 00:57:00 +08001363 goto err_bd_out;
Sascha Hauer1fa81c22011-01-12 13:02:28 +01001364 break;
1365 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1366 bd->mode.command = 2;
1367 if (count & 1 || sg->dma_address & 1)
Robin Gong57b772b2018-06-20 00:57:00 +08001368 goto err_bd_out;
Sascha Hauer1fa81c22011-01-12 13:02:28 +01001369 break;
1370 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1371 bd->mode.command = 1;
1372 break;
1373 default:
Robin Gong57b772b2018-06-20 00:57:00 +08001374 goto err_bd_out;
Sascha Hauer1fa81c22011-01-12 13:02:28 +01001375 }
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001376
1377 param = BD_DONE | BD_EXTD | BD_CONT;
1378
Shawn Guo341b9412011-01-20 05:50:39 +08001379 if (i + 1 == sg_len) {
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001380 param |= BD_INTR;
Shawn Guo341b9412011-01-20 05:50:39 +08001381 param |= BD_LAST;
1382 param &= ~BD_CONT;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001383 }
1384
Olof Johanssonc3cc74b2013-11-12 22:30:44 -08001385 dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
1386 i, count, (u64)sg->dma_address,
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001387 param & BD_WRAP ? "wrap" : "",
1388 param & BD_INTR ? " intr" : "");
1389
1390 bd->mode.status = param;
1391 }
1392
Robin Gong57b772b2018-06-20 00:57:00 +08001393 return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1394err_bd_out:
1395 sdma_free_bd(desc);
1396 kfree(desc);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001397err_out:
Shawn Guo4b2ce9d2011-01-20 05:50:36 +08001398 sdmac->status = DMA_ERROR;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001399 return NULL;
1400}
1401
1402static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1403 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
Alexandre Bounine185ecb52012-03-08 15:35:13 -05001404 size_t period_len, enum dma_transfer_direction direction,
Laurent Pinchart31c1e5a2014-08-01 12:20:10 +02001405 unsigned long flags)
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001406{
1407 struct sdma_channel *sdmac = to_sdma_chan(chan);
1408 struct sdma_engine *sdma = sdmac->sdma;
1409 int num_periods = buf_len / period_len;
Sascha Hauer23889c62011-01-31 10:56:58 +01001410 int channel = sdmac->channel;
Robin Gong21420842018-06-20 00:57:03 +08001411 int i = 0, buf = 0;
Robin Gong57b772b2018-06-20 00:57:00 +08001412 struct sdma_desc *desc;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001413
1414 dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
1415
Robin Gong21420842018-06-20 00:57:03 +08001416 desc = sdma_transfer_init(sdmac, direction, num_periods);
Robin Gong57b772b2018-06-20 00:57:00 +08001417 if (!desc)
1418 goto err_out;
1419
Sascha Hauer76c33d22018-06-20 00:56:59 +08001420 desc->period_len = period_len;
Richard Zhao8e2e27c2012-06-04 09:17:24 +08001421
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001422 sdmac->flags |= IMX_DMA_SG_LOOP;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001423
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001424 if (period_len > 0xffff) {
Arvind Yadavba6ab3b2017-05-24 12:19:06 +05301425 dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %zu > %d\n",
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001426 channel, period_len, 0xffff);
Robin Gong57b772b2018-06-20 00:57:00 +08001427 goto err_bd_out;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001428 }
1429
1430 while (buf < buf_len) {
Sascha Hauer76c33d22018-06-20 00:56:59 +08001431 struct sdma_buffer_descriptor *bd = &desc->bd[i];
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001432 int param;
1433
1434 bd->buffer_addr = dma_addr;
1435
1436 bd->mode.count = period_len;
1437
1438 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
Robin Gong57b772b2018-06-20 00:57:00 +08001439 goto err_bd_out;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001440 if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
1441 bd->mode.command = 0;
1442 else
1443 bd->mode.command = sdmac->word_size;
1444
1445 param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR;
1446 if (i + 1 == num_periods)
1447 param |= BD_WRAP;
1448
Arvind Yadavba6ab3b2017-05-24 12:19:06 +05301449 dev_dbg(sdma->dev, "entry %d: count: %zu dma: %#llx %s%s\n",
Olof Johanssonc3cc74b2013-11-12 22:30:44 -08001450 i, period_len, (u64)dma_addr,
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001451 param & BD_WRAP ? "wrap" : "",
1452 param & BD_INTR ? " intr" : "");
1453
1454 bd->mode.status = param;
1455
1456 dma_addr += period_len;
1457 buf += period_len;
1458
1459 i++;
1460 }
1461
Robin Gong57b772b2018-06-20 00:57:00 +08001462 return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1463err_bd_out:
1464 sdma_free_bd(desc);
1465 kfree(desc);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001466err_out:
1467 sdmac->status = DMA_ERROR;
1468 return NULL;
1469}
1470
Maxime Ripard7b350ab2014-11-17 14:42:17 +01001471static int sdma_config(struct dma_chan *chan,
1472 struct dma_slave_config *dmaengine_cfg)
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001473{
1474 struct sdma_channel *sdmac = to_sdma_chan(chan);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001475
Maxime Ripard7b350ab2014-11-17 14:42:17 +01001476 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
1477 sdmac->per_address = dmaengine_cfg->src_addr;
1478 sdmac->watermark_level = dmaengine_cfg->src_maxburst *
1479 dmaengine_cfg->src_addr_width;
1480 sdmac->word_size = dmaengine_cfg->src_addr_width;
Shengjiu Wang8391ecf2015-07-10 17:08:16 +08001481 } else if (dmaengine_cfg->direction == DMA_DEV_TO_DEV) {
1482 sdmac->per_address2 = dmaengine_cfg->src_addr;
1483 sdmac->per_address = dmaengine_cfg->dst_addr;
1484 sdmac->watermark_level = dmaengine_cfg->src_maxburst &
1485 SDMA_WATERMARK_LEVEL_LWML;
1486 sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) &
1487 SDMA_WATERMARK_LEVEL_HWML;
1488 sdmac->word_size = dmaengine_cfg->dst_addr_width;
Maxime Ripard7b350ab2014-11-17 14:42:17 +01001489 } else {
1490 sdmac->per_address = dmaengine_cfg->dst_addr;
1491 sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
1492 dmaengine_cfg->dst_addr_width;
1493 sdmac->word_size = dmaengine_cfg->dst_addr_width;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001494 }
Maxime Ripard7b350ab2014-11-17 14:42:17 +01001495 sdmac->direction = dmaengine_cfg->direction;
1496 return sdma_config_channel(chan);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001497}
1498
1499static enum dma_status sdma_tx_status(struct dma_chan *chan,
Andy Shevchenkoe8e3a792013-05-27 15:14:31 +03001500 dma_cookie_t cookie,
1501 struct dma_tx_state *txstate)
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001502{
1503 struct sdma_channel *sdmac = to_sdma_chan(chan);
Robin Gong57b772b2018-06-20 00:57:00 +08001504 struct sdma_desc *desc;
Russell King - ARM Linuxd1a792f2014-06-25 13:00:33 +01001505 u32 residue;
Robin Gong57b772b2018-06-20 00:57:00 +08001506 struct virt_dma_desc *vd;
1507 enum dma_status ret;
1508 unsigned long flags;
Russell King - ARM Linuxd1a792f2014-06-25 13:00:33 +01001509
Robin Gong57b772b2018-06-20 00:57:00 +08001510 ret = dma_cookie_status(chan, cookie, txstate);
1511 if (ret == DMA_COMPLETE || !txstate)
1512 return ret;
1513
1514 spin_lock_irqsave(&sdmac->vc.lock, flags);
1515 vd = vchan_find_desc(&sdmac->vc, cookie);
1516 if (vd) {
1517 desc = to_sdma_desc(&vd->tx);
1518 if (sdmac->flags & IMX_DMA_SG_LOOP)
1519 residue = (desc->num_bd - desc->buf_ptail) *
1520 desc->period_len - desc->chn_real_count;
1521 else
1522 residue = desc->chn_count - desc->chn_real_count;
1523 } else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) {
1524 residue = sdmac->desc->chn_count - sdmac->desc->chn_real_count;
1525 } else {
1526 residue = 0;
1527 }
1528 spin_unlock_irqrestore(&sdmac->vc.lock, flags);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001529
Andy Shevchenkoe8e3a792013-05-27 15:14:31 +03001530 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
Russell King - ARM Linuxd1a792f2014-06-25 13:00:33 +01001531 residue);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001532
Shawn Guo8a965912011-01-20 05:50:37 +08001533 return sdmac->status;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001534}
1535
1536static void sdma_issue_pending(struct dma_chan *chan)
1537{
Sascha Hauer2b4f1302012-01-09 10:32:50 +01001538 struct sdma_channel *sdmac = to_sdma_chan(chan);
Robin Gong57b772b2018-06-20 00:57:00 +08001539 unsigned long flags;
Sascha Hauer2b4f1302012-01-09 10:32:50 +01001540
Robin Gong57b772b2018-06-20 00:57:00 +08001541 spin_lock_irqsave(&sdmac->vc.lock, flags);
1542 if (vchan_issue_pending(&sdmac->vc) && !sdmac->desc)
1543 sdma_start_desc(sdmac);
1544 spin_unlock_irqrestore(&sdmac->vc.lock, flags);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001545}
1546
Sascha Hauer5b28aa32010-10-06 15:41:15 +02001547#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
Nicolin Chencd72b842013-11-13 22:55:24 +08001548#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38
Fabio Estevama5724602015-03-11 12:30:58 -03001549#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 41
Fabio Estevamb7d26482016-08-10 13:05:05 -03001550#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 42
Sascha Hauer5b28aa32010-10-06 15:41:15 +02001551
1552static void sdma_add_scripts(struct sdma_engine *sdma,
1553 const struct sdma_script_start_addrs *addr)
1554{
1555 s32 *addr_arr = (u32 *)addr;
1556 s32 *saddr_arr = (u32 *)sdma->script_addrs;
1557 int i;
1558
Nicolin Chen70dabaed2014-01-08 16:45:56 +08001559 /* use the default firmware in ROM if missing external firmware */
1560 if (!sdma->script_number)
1561 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1562
Nicolin Chencd72b842013-11-13 22:55:24 +08001563 for (i = 0; i < sdma->script_number; i++)
Sascha Hauer5b28aa32010-10-06 15:41:15 +02001564 if (addr_arr[i] > 0)
1565 saddr_arr[i] = addr_arr[i];
1566}
1567
Sascha Hauer7b4b88e2011-08-25 11:03:37 +02001568static void sdma_load_firmware(const struct firmware *fw, void *context)
Sascha Hauer5b28aa32010-10-06 15:41:15 +02001569{
Sascha Hauer7b4b88e2011-08-25 11:03:37 +02001570 struct sdma_engine *sdma = context;
Sascha Hauer5b28aa32010-10-06 15:41:15 +02001571 const struct sdma_firmware_header *header;
Sascha Hauer5b28aa32010-10-06 15:41:15 +02001572 const struct sdma_script_start_addrs *addr;
1573 unsigned short *ram_code;
1574
Sascha Hauer7b4b88e2011-08-25 11:03:37 +02001575 if (!fw) {
Sascha Hauer0f927a12014-11-12 20:04:29 -02001576 dev_info(sdma->dev, "external firmware not found, using ROM firmware\n");
1577 /* In this case we just use the ROM firmware. */
Sascha Hauer7b4b88e2011-08-25 11:03:37 +02001578 return;
1579 }
Sascha Hauer5b28aa32010-10-06 15:41:15 +02001580
1581 if (fw->size < sizeof(*header))
1582 goto err_firmware;
1583
1584 header = (struct sdma_firmware_header *)fw->data;
1585
1586 if (header->magic != SDMA_FIRMWARE_MAGIC)
1587 goto err_firmware;
1588 if (header->ram_code_start + header->ram_code_size > fw->size)
1589 goto err_firmware;
Nicolin Chencd72b842013-11-13 22:55:24 +08001590 switch (header->version_major) {
Asaf Vertz681d15e2014-12-10 10:00:36 +02001591 case 1:
1592 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1593 break;
1594 case 2:
1595 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
1596 break;
Fabio Estevama5724602015-03-11 12:30:58 -03001597 case 3:
1598 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3;
1599 break;
Fabio Estevamb7d26482016-08-10 13:05:05 -03001600 case 4:
1601 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4;
1602 break;
Asaf Vertz681d15e2014-12-10 10:00:36 +02001603 default:
1604 dev_err(sdma->dev, "unknown firmware version\n");
1605 goto err_firmware;
Nicolin Chencd72b842013-11-13 22:55:24 +08001606 }
Sascha Hauer5b28aa32010-10-06 15:41:15 +02001607
1608 addr = (void *)header + header->script_addrs_start;
1609 ram_code = (void *)header + header->ram_code_start;
1610
Sascha Hauer7560e3f2012-03-07 09:30:06 +01001611 clk_enable(sdma->clk_ipg);
1612 clk_enable(sdma->clk_ahb);
Sascha Hauer5b28aa32010-10-06 15:41:15 +02001613 /* download the RAM image for SDMA */
1614 sdma_load_script(sdma, ram_code,
1615 header->ram_code_size,
Sascha Hauer6866fd32011-01-12 11:18:14 +01001616 addr->ram_code_start_addr);
Sascha Hauer7560e3f2012-03-07 09:30:06 +01001617 clk_disable(sdma->clk_ipg);
1618 clk_disable(sdma->clk_ahb);
Sascha Hauer5b28aa32010-10-06 15:41:15 +02001619
1620 sdma_add_scripts(sdma, addr);
1621
1622 dev_info(sdma->dev, "loaded firmware %d.%d\n",
1623 header->version_major,
1624 header->version_minor);
1625
1626err_firmware:
1627 release_firmware(fw);
Sascha Hauer7b4b88e2011-08-25 11:03:37 +02001628}
1629
Zidan Wangd078cd12015-07-23 11:40:49 +08001630#define EVENT_REMAP_CELLS 3
1631
Jason Liu29f493d2015-11-11 17:20:49 +08001632static int sdma_event_remap(struct sdma_engine *sdma)
Zidan Wangd078cd12015-07-23 11:40:49 +08001633{
1634 struct device_node *np = sdma->dev->of_node;
1635 struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0);
1636 struct property *event_remap;
1637 struct regmap *gpr;
1638 char propname[] = "fsl,sdma-event-remap";
1639 u32 reg, val, shift, num_map, i;
1640 int ret = 0;
1641
1642 if (IS_ERR(np) || IS_ERR(gpr_np))
1643 goto out;
1644
1645 event_remap = of_find_property(np, propname, NULL);
1646 num_map = event_remap ? (event_remap->length / sizeof(u32)) : 0;
1647 if (!num_map) {
Fabio Estevamce078af2015-10-03 19:37:58 -03001648 dev_dbg(sdma->dev, "no event needs to be remapped\n");
Zidan Wangd078cd12015-07-23 11:40:49 +08001649 goto out;
1650 } else if (num_map % EVENT_REMAP_CELLS) {
1651 dev_err(sdma->dev, "the property %s must modulo %d\n",
1652 propname, EVENT_REMAP_CELLS);
1653 ret = -EINVAL;
1654 goto out;
1655 }
1656
1657 gpr = syscon_node_to_regmap(gpr_np);
1658 if (IS_ERR(gpr)) {
1659 dev_err(sdma->dev, "failed to get gpr regmap\n");
1660 ret = PTR_ERR(gpr);
1661 goto out;
1662 }
1663
1664 for (i = 0; i < num_map; i += EVENT_REMAP_CELLS) {
1665 ret = of_property_read_u32_index(np, propname, i, &reg);
1666 if (ret) {
1667 dev_err(sdma->dev, "failed to read property %s index %d\n",
1668 propname, i);
1669 goto out;
1670 }
1671
1672 ret = of_property_read_u32_index(np, propname, i + 1, &shift);
1673 if (ret) {
1674 dev_err(sdma->dev, "failed to read property %s index %d\n",
1675 propname, i + 1);
1676 goto out;
1677 }
1678
1679 ret = of_property_read_u32_index(np, propname, i + 2, &val);
1680 if (ret) {
1681 dev_err(sdma->dev, "failed to read property %s index %d\n",
1682 propname, i + 2);
1683 goto out;
1684 }
1685
1686 regmap_update_bits(gpr, reg, BIT(shift), val << shift);
1687 }
1688
1689out:
1690 if (!IS_ERR(gpr_np))
1691 of_node_put(gpr_np);
1692
1693 return ret;
1694}
1695
Arnd Bergmannfe6cf282014-09-26 23:24:00 +02001696static int sdma_get_firmware(struct sdma_engine *sdma,
Sascha Hauer7b4b88e2011-08-25 11:03:37 +02001697 const char *fw_name)
1698{
1699 int ret;
1700
1701 ret = request_firmware_nowait(THIS_MODULE,
1702 FW_ACTION_HOTPLUG, fw_name, sdma->dev,
1703 GFP_KERNEL, sdma, sdma_load_firmware);
Sascha Hauer5b28aa32010-10-06 15:41:15 +02001704
1705 return ret;
1706}
1707
Jingoo Han19bfc772014-11-06 10:10:09 +09001708static int sdma_init(struct sdma_engine *sdma)
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001709{
1710 int i, ret;
1711 dma_addr_t ccb_phys;
1712
Fabio Estevamb93edcd2015-07-29 21:03:49 -03001713 ret = clk_enable(sdma->clk_ipg);
1714 if (ret)
1715 return ret;
1716 ret = clk_enable(sdma->clk_ahb);
1717 if (ret)
1718 goto disable_clk_ipg;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001719
1720 /* Be sure SDMA has not started yet */
Richard Zhaoc4b56852012-01-13 11:09:57 +08001721 writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001722
1723 sdma->channel_control = dma_alloc_coherent(NULL,
1724 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
1725 sizeof(struct sdma_context_data),
1726 &ccb_phys, GFP_KERNEL);
1727
1728 if (!sdma->channel_control) {
1729 ret = -ENOMEM;
1730 goto err_dma_alloc;
1731 }
1732
1733 sdma->context = (void *)sdma->channel_control +
1734 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1735 sdma->context_phys = ccb_phys +
1736 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1737
1738 /* Zero-out the CCB structures array just allocated */
1739 memset(sdma->channel_control, 0,
1740 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control));
1741
1742 /* disable all channels */
Sascha Hauer17bba722013-08-20 10:04:31 +02001743 for (i = 0; i < sdma->drvdata->num_events; i++)
Richard Zhaoc4b56852012-01-13 11:09:57 +08001744 writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001745
1746 /* All channels have priority 0 */
1747 for (i = 0; i < MAX_DMA_CHANNELS; i++)
Richard Zhaoc4b56852012-01-13 11:09:57 +08001748 writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001749
Robin Gong57b772b2018-06-20 00:57:00 +08001750 ret = sdma_request_channel0(sdma);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001751 if (ret)
1752 goto err_dma_alloc;
1753
1754 sdma_config_ownership(&sdma->channel[0], false, true, false);
1755
1756 /* Set Command Channel (Channel Zero) */
Richard Zhaoc4b56852012-01-13 11:09:57 +08001757 writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001758
1759 /* Set bits of CONFIG register but with static context switching */
1760 /* FIXME: Check whether to set ACR bit depending on clock ratios */
Richard Zhaoc4b56852012-01-13 11:09:57 +08001761 writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001762
Richard Zhaoc4b56852012-01-13 11:09:57 +08001763 writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001764
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001765 /* Initializes channel's priorities */
1766 sdma_set_channel_priority(&sdma->channel[0], 7);
1767
Sascha Hauer7560e3f2012-03-07 09:30:06 +01001768 clk_disable(sdma->clk_ipg);
1769 clk_disable(sdma->clk_ahb);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001770
1771 return 0;
1772
1773err_dma_alloc:
Sascha Hauer7560e3f2012-03-07 09:30:06 +01001774 clk_disable(sdma->clk_ahb);
Fabio Estevamb93edcd2015-07-29 21:03:49 -03001775disable_clk_ipg:
1776 clk_disable(sdma->clk_ipg);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001777 dev_err(sdma->dev, "initialisation failed with %d\n", ret);
1778 return ret;
1779}
1780
Shawn Guo9479e172013-05-30 22:23:32 +08001781static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param)
1782{
Nicolin Chen0b351862014-06-16 11:32:29 +08001783 struct sdma_channel *sdmac = to_sdma_chan(chan);
Shawn Guo9479e172013-05-30 22:23:32 +08001784 struct imx_dma_data *data = fn_param;
1785
1786 if (!imx_dma_is_general_purpose(chan))
1787 return false;
1788
Nicolin Chen0b351862014-06-16 11:32:29 +08001789 sdmac->data = *data;
1790 chan->private = &sdmac->data;
Shawn Guo9479e172013-05-30 22:23:32 +08001791
1792 return true;
1793}
1794
1795static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
1796 struct of_dma *ofdma)
1797{
1798 struct sdma_engine *sdma = ofdma->of_dma_data;
1799 dma_cap_mask_t mask = sdma->dma_device.cap_mask;
1800 struct imx_dma_data data;
1801
1802 if (dma_spec->args_count != 3)
1803 return NULL;
1804
1805 data.dma_request = dma_spec->args[0];
1806 data.peripheral_type = dma_spec->args[1];
1807 data.priority = dma_spec->args[2];
Shengjiu Wang8391ecf2015-07-10 17:08:16 +08001808 /*
1809 * init dma_request2 to zero, which is not used by the dts.
1810 * For P2P, dma_request2 is init from dma_request_channel(),
1811 * chan->private will point to the imx_dma_data, and in
1812 * device_alloc_chan_resources(), imx_dma_data.dma_request2 will
1813 * be set to sdmac->event_id1.
1814 */
1815 data.dma_request2 = 0;
Shawn Guo9479e172013-05-30 22:23:32 +08001816
1817 return dma_request_channel(mask, sdma_filter_fn, &data);
1818}
1819
Mark Browne34b7312014-08-27 11:55:53 +01001820static int sdma_probe(struct platform_device *pdev)
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001821{
Shawn Guo580975d2011-07-14 08:35:48 +08001822 const struct of_device_id *of_id =
1823 of_match_device(sdma_dt_ids, &pdev->dev);
1824 struct device_node *np = pdev->dev.of_node;
Shengjiu Wang8391ecf2015-07-10 17:08:16 +08001825 struct device_node *spba_bus;
Shawn Guo580975d2011-07-14 08:35:48 +08001826 const char *fw_name;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001827 int ret;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001828 int irq;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001829 struct resource *iores;
Shengjiu Wang8391ecf2015-07-10 17:08:16 +08001830 struct resource spba_res;
Jingoo Hand4adcc02013-07-30 17:09:11 +09001831 struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001832 int i;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001833 struct sdma_engine *sdma;
Sascha Hauer36e2f212011-08-25 11:03:36 +02001834 s32 *saddr_arr;
Sascha Hauer17bba722013-08-20 10:04:31 +02001835 const struct sdma_driver_data *drvdata = NULL;
1836
1837 if (of_id)
1838 drvdata = of_id->data;
1839 else if (pdev->id_entry)
1840 drvdata = (void *)pdev->id_entry->driver_data;
1841
1842 if (!drvdata) {
1843 dev_err(&pdev->dev, "unable to find driver data\n");
1844 return -EINVAL;
1845 }
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001846
Philippe Retornaz42536b92013-10-14 09:45:17 +01001847 ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1848 if (ret)
1849 return ret;
1850
Fabio Estevam7f24e0e2014-12-29 15:20:52 -02001851 sdma = devm_kzalloc(&pdev->dev, sizeof(*sdma), GFP_KERNEL);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001852 if (!sdma)
1853 return -ENOMEM;
1854
Richard Zhao2ccaef02012-05-11 15:14:27 +08001855 spin_lock_init(&sdma->channel_0_lock);
Sascha Hauer73eab972011-08-25 11:03:35 +02001856
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001857 sdma->dev = &pdev->dev;
Sascha Hauer17bba722013-08-20 10:04:31 +02001858 sdma->drvdata = drvdata;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001859
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001860 irq = platform_get_irq(pdev, 0);
Fabio Estevam7f24e0e2014-12-29 15:20:52 -02001861 if (irq < 0)
Fabio Estevam63c72e02014-12-29 15:20:53 -02001862 return irq;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001863
Fabio Estevam7f24e0e2014-12-29 15:20:52 -02001864 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1865 sdma->regs = devm_ioremap_resource(&pdev->dev, iores);
1866 if (IS_ERR(sdma->regs))
1867 return PTR_ERR(sdma->regs);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001868
Sascha Hauer7560e3f2012-03-07 09:30:06 +01001869 sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
Fabio Estevam7f24e0e2014-12-29 15:20:52 -02001870 if (IS_ERR(sdma->clk_ipg))
1871 return PTR_ERR(sdma->clk_ipg);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001872
Sascha Hauer7560e3f2012-03-07 09:30:06 +01001873 sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
Fabio Estevam7f24e0e2014-12-29 15:20:52 -02001874 if (IS_ERR(sdma->clk_ahb))
1875 return PTR_ERR(sdma->clk_ahb);
Sascha Hauer7560e3f2012-03-07 09:30:06 +01001876
Arvind Yadavfb9caf32017-05-24 12:09:53 +05301877 ret = clk_prepare(sdma->clk_ipg);
1878 if (ret)
1879 return ret;
1880
1881 ret = clk_prepare(sdma->clk_ahb);
1882 if (ret)
1883 goto err_clk;
Sascha Hauer7560e3f2012-03-07 09:30:06 +01001884
Fabio Estevam7f24e0e2014-12-29 15:20:52 -02001885 ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
1886 sdma);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001887 if (ret)
Arvind Yadavfb9caf32017-05-24 12:09:53 +05301888 goto err_irq;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001889
Vinod Koul5bb9dbb2016-07-03 00:00:55 +05301890 sdma->irq = irq;
1891
Sascha Hauer5b28aa32010-10-06 15:41:15 +02001892 sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
Arvind Yadavfb9caf32017-05-24 12:09:53 +05301893 if (!sdma->script_addrs) {
1894 ret = -ENOMEM;
1895 goto err_irq;
1896 }
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001897
Sascha Hauer36e2f212011-08-25 11:03:36 +02001898 /* initially no scripts available */
1899 saddr_arr = (s32 *)sdma->script_addrs;
1900 for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
1901 saddr_arr[i] = -EINVAL;
1902
Sascha Hauer7214a8b2011-01-31 10:21:35 +01001903 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
1904 dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
1905
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001906 INIT_LIST_HEAD(&sdma->dma_device.channels);
1907 /* Initialize channel parameters */
1908 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
1909 struct sdma_channel *sdmac = &sdma->channel[i];
1910
1911 sdmac->sdma = sdma;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001912
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001913 sdmac->channel = i;
Robin Gong57b772b2018-06-20 00:57:00 +08001914 sdmac->vc.desc_free = sdma_desc_free;
Sascha Hauer23889c62011-01-31 10:56:58 +01001915 /*
1916 * Add the channel to the DMAC list. Do not add channel 0 though
1917 * because we need it internally in the SDMA driver. This also means
1918 * that channel 0 in dmaengine counting matches sdma channel 1.
1919 */
1920 if (i)
Robin Gong57b772b2018-06-20 00:57:00 +08001921 vchan_init(&sdmac->vc, &sdma->dma_device);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001922 }
1923
Sascha Hauer5b28aa32010-10-06 15:41:15 +02001924 ret = sdma_init(sdma);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001925 if (ret)
1926 goto err_init;
1927
Zidan Wangd078cd12015-07-23 11:40:49 +08001928 ret = sdma_event_remap(sdma);
1929 if (ret)
1930 goto err_init;
1931
Sascha Hauerdcfec3c2013-08-20 10:04:32 +02001932 if (sdma->drvdata->script_addrs)
1933 sdma_add_scripts(sdma, sdma->drvdata->script_addrs);
Shawn Guo580975d2011-07-14 08:35:48 +08001934 if (pdata && pdata->script_addrs)
Sascha Hauer5b28aa32010-10-06 15:41:15 +02001935 sdma_add_scripts(sdma, pdata->script_addrs);
1936
Shawn Guo580975d2011-07-14 08:35:48 +08001937 if (pdata) {
Fabio Estevam6d0d7e22012-02-29 11:20:38 -03001938 ret = sdma_get_firmware(sdma, pdata->fw_name);
1939 if (ret)
Fabio Estevamad1122e2012-03-08 09:26:39 -03001940 dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
Shawn Guo580975d2011-07-14 08:35:48 +08001941 } else {
1942 /*
1943 * Because that device tree does not encode ROM script address,
1944 * the RAM script in firmware is mandatory for device tree
1945 * probe, otherwise it fails.
1946 */
1947 ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
1948 &fw_name);
Fabio Estevam6602b0d2012-02-29 11:20:37 -03001949 if (ret)
Fabio Estevamad1122e2012-03-08 09:26:39 -03001950 dev_warn(&pdev->dev, "failed to get firmware name\n");
Fabio Estevam6602b0d2012-02-29 11:20:37 -03001951 else {
1952 ret = sdma_get_firmware(sdma, fw_name);
1953 if (ret)
Fabio Estevamad1122e2012-03-08 09:26:39 -03001954 dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
Shawn Guo580975d2011-07-14 08:35:48 +08001955 }
1956 }
Sascha Hauer5b28aa32010-10-06 15:41:15 +02001957
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001958 sdma->dma_device.dev = &pdev->dev;
1959
1960 sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
1961 sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
1962 sdma->dma_device.device_tx_status = sdma_tx_status;
1963 sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
1964 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
Maxime Ripard7b350ab2014-11-17 14:42:17 +01001965 sdma->dma_device.device_config = sdma_config;
Jiada Wang7f3ff142017-03-16 23:12:09 -07001966 sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay;
Nicolin Chenf9d4a392017-09-14 11:46:43 -07001967 sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
1968 sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
1969 sdma->dma_device.directions = SDMA_DMA_DIRECTIONS;
Lucas Stach6f3125ce2017-03-08 10:13:09 +01001970 sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001971 sdma->dma_device.device_issue_pending = sdma_issue_pending;
Sascha Hauerb9b3f822011-01-12 12:12:31 +01001972 sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
1973 dma_set_max_seg_size(sdma->dma_device.dev, 65535);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001974
Vignesh Raman23e11812014-08-05 18:39:41 +05301975 platform_set_drvdata(pdev, sdma);
1976
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001977 ret = dma_async_device_register(&sdma->dma_device);
1978 if (ret) {
1979 dev_err(&pdev->dev, "unable to register\n");
1980 goto err_init;
1981 }
1982
Shawn Guo9479e172013-05-30 22:23:32 +08001983 if (np) {
1984 ret = of_dma_controller_register(np, sdma_xlate, sdma);
1985 if (ret) {
1986 dev_err(&pdev->dev, "failed to register controller\n");
1987 goto err_register;
1988 }
Shengjiu Wang8391ecf2015-07-10 17:08:16 +08001989
1990 spba_bus = of_find_compatible_node(NULL, NULL, "fsl,spba-bus");
1991 ret = of_address_to_resource(spba_bus, 0, &spba_res);
1992 if (!ret) {
1993 sdma->spba_start_addr = spba_res.start;
1994 sdma->spba_end_addr = spba_res.end;
1995 }
1996 of_node_put(spba_bus);
Shawn Guo9479e172013-05-30 22:23:32 +08001997 }
1998
Sascha Hauer1ec1e822010-09-30 13:56:34 +00001999 return 0;
2000
Shawn Guo9479e172013-05-30 22:23:32 +08002001err_register:
2002 dma_async_device_unregister(&sdma->dma_device);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00002003err_init:
2004 kfree(sdma->script_addrs);
Arvind Yadavfb9caf32017-05-24 12:09:53 +05302005err_irq:
2006 clk_unprepare(sdma->clk_ahb);
2007err_clk:
2008 clk_unprepare(sdma->clk_ipg);
Shawn Guo939fd4f2011-01-19 19:13:06 +08002009 return ret;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00002010}
2011
Maxin B. John1d1bbd32013-02-20 02:07:04 +02002012static int sdma_remove(struct platform_device *pdev)
Sascha Hauer1ec1e822010-09-30 13:56:34 +00002013{
Vignesh Raman23e11812014-08-05 18:39:41 +05302014 struct sdma_engine *sdma = platform_get_drvdata(pdev);
Vignesh Ramanc12fe492014-08-05 18:39:42 +05302015 int i;
Vignesh Raman23e11812014-08-05 18:39:41 +05302016
Vinod Koul5bb9dbb2016-07-03 00:00:55 +05302017 devm_free_irq(&pdev->dev, sdma->irq, sdma);
Vignesh Raman23e11812014-08-05 18:39:41 +05302018 dma_async_device_unregister(&sdma->dma_device);
2019 kfree(sdma->script_addrs);
Arvind Yadavfb9caf32017-05-24 12:09:53 +05302020 clk_unprepare(sdma->clk_ahb);
2021 clk_unprepare(sdma->clk_ipg);
Vignesh Ramanc12fe492014-08-05 18:39:42 +05302022 /* Kill the tasklet */
2023 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
2024 struct sdma_channel *sdmac = &sdma->channel[i];
2025
Robin Gong57b772b2018-06-20 00:57:00 +08002026 tasklet_kill(&sdmac->vc.task);
2027 sdma_free_chan_resources(&sdmac->vc.chan);
Vignesh Ramanc12fe492014-08-05 18:39:42 +05302028 }
Vignesh Raman23e11812014-08-05 18:39:41 +05302029
2030 platform_set_drvdata(pdev, NULL);
Vignesh Raman23e11812014-08-05 18:39:41 +05302031 return 0;
Sascha Hauer1ec1e822010-09-30 13:56:34 +00002032}
2033
2034static struct platform_driver sdma_driver = {
2035 .driver = {
2036 .name = "imx-sdma",
Shawn Guo580975d2011-07-14 08:35:48 +08002037 .of_match_table = sdma_dt_ids,
Sascha Hauer1ec1e822010-09-30 13:56:34 +00002038 },
Shawn Guo62550cd2011-07-13 21:33:17 +08002039 .id_table = sdma_devtypes,
Maxin B. John1d1bbd32013-02-20 02:07:04 +02002040 .remove = sdma_remove,
Vignesh Raman23e11812014-08-05 18:39:41 +05302041 .probe = sdma_probe,
Sascha Hauer1ec1e822010-09-30 13:56:34 +00002042};
2043
Vignesh Raman23e11812014-08-05 18:39:41 +05302044module_platform_driver(sdma_driver);
Sascha Hauer1ec1e822010-09-30 13:56:34 +00002045
2046MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
2047MODULE_DESCRIPTION("i.MX SDMA driver");
Nicolas Chauvetc0879342017-12-13 16:50:33 +01002048#if IS_ENABLED(CONFIG_SOC_IMX6Q)
2049MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin");
2050#endif
2051#if IS_ENABLED(CONFIG_SOC_IMX7D)
2052MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin");
2053#endif
Sascha Hauer1ec1e822010-09-30 13:56:34 +00002054MODULE_LICENSE("GPL");