blob: e29342ab85f687c71dbf0f2447c3057e67ef4f73 [file] [log] [blame]
Baolin Wang9b3b8172017-10-24 13:47:50 +08001/*
2 * Copyright (C) 2017 Spreadtrum Communications Inc.
3 *
4 * SPDX-License-Identifier: GPL-2.0
5 */
6
7#include <linux/clk.h>
8#include <linux/dma-mapping.h>
Eric Longab42ddb2018-04-19 10:00:48 +08009#include <linux/dma/sprd-dma.h>
Baolin Wang9b3b8172017-10-24 13:47:50 +080010#include <linux/errno.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/of.h>
17#include <linux/of_dma.h>
18#include <linux/of_device.h>
19#include <linux/pm_runtime.h>
20#include <linux/slab.h>
21
22#include "virt-dma.h"
23
24#define SPRD_DMA_CHN_REG_OFFSET 0x1000
25#define SPRD_DMA_CHN_REG_LENGTH 0x40
26#define SPRD_DMA_MEMCPY_MIN_SIZE 64
27
28/* DMA global registers definition */
29#define SPRD_DMA_GLB_PAUSE 0x0
30#define SPRD_DMA_GLB_FRAG_WAIT 0x4
31#define SPRD_DMA_GLB_REQ_PEND0_EN 0x8
32#define SPRD_DMA_GLB_REQ_PEND1_EN 0xc
33#define SPRD_DMA_GLB_INT_RAW_STS 0x10
34#define SPRD_DMA_GLB_INT_MSK_STS 0x14
35#define SPRD_DMA_GLB_REQ_STS 0x18
36#define SPRD_DMA_GLB_CHN_EN_STS 0x1c
37#define SPRD_DMA_GLB_DEBUG_STS 0x20
38#define SPRD_DMA_GLB_ARB_SEL_STS 0x24
Eric Long770399d2018-11-06 13:01:36 +080039#define SPRD_DMA_GLB_2STAGE_GRP1 0x28
40#define SPRD_DMA_GLB_2STAGE_GRP2 0x2c
Baolin Wang9b3b8172017-10-24 13:47:50 +080041#define SPRD_DMA_GLB_REQ_UID(uid) (0x4 * ((uid) - 1))
42#define SPRD_DMA_GLB_REQ_UID_OFFSET 0x2000
43
44/* DMA channel registers definition */
45#define SPRD_DMA_CHN_PAUSE 0x0
46#define SPRD_DMA_CHN_REQ 0x4
47#define SPRD_DMA_CHN_CFG 0x8
48#define SPRD_DMA_CHN_INTC 0xc
49#define SPRD_DMA_CHN_SRC_ADDR 0x10
50#define SPRD_DMA_CHN_DES_ADDR 0x14
51#define SPRD_DMA_CHN_FRG_LEN 0x18
52#define SPRD_DMA_CHN_BLK_LEN 0x1c
53#define SPRD_DMA_CHN_TRSC_LEN 0x20
54#define SPRD_DMA_CHN_TRSF_STEP 0x24
55#define SPRD_DMA_CHN_WARP_PTR 0x28
56#define SPRD_DMA_CHN_WARP_TO 0x2c
57#define SPRD_DMA_CHN_LLIST_PTR 0x30
58#define SPRD_DMA_CHN_FRAG_STEP 0x34
59#define SPRD_DMA_CHN_SRC_BLK_STEP 0x38
60#define SPRD_DMA_CHN_DES_BLK_STEP 0x3c
61
Eric Long770399d2018-11-06 13:01:36 +080062/* SPRD_DMA_GLB_2STAGE_GRP register definition */
63#define SPRD_DMA_GLB_2STAGE_EN BIT(24)
64#define SPRD_DMA_GLB_CHN_INT_MASK GENMASK(23, 20)
65#define SPRD_DMA_GLB_LIST_DONE_TRG BIT(19)
66#define SPRD_DMA_GLB_TRANS_DONE_TRG BIT(18)
67#define SPRD_DMA_GLB_BLOCK_DONE_TRG BIT(17)
68#define SPRD_DMA_GLB_FRAG_DONE_TRG BIT(16)
69#define SPRD_DMA_GLB_TRG_OFFSET 16
70#define SPRD_DMA_GLB_DEST_CHN_MASK GENMASK(13, 8)
71#define SPRD_DMA_GLB_DEST_CHN_OFFSET 8
72#define SPRD_DMA_GLB_SRC_CHN_MASK GENMASK(5, 0)
73
Baolin Wang9b3b8172017-10-24 13:47:50 +080074/* SPRD_DMA_CHN_INTC register definition */
75#define SPRD_DMA_INT_MASK GENMASK(4, 0)
76#define SPRD_DMA_INT_CLR_OFFSET 24
77#define SPRD_DMA_FRAG_INT_EN BIT(0)
78#define SPRD_DMA_BLK_INT_EN BIT(1)
79#define SPRD_DMA_TRANS_INT_EN BIT(2)
80#define SPRD_DMA_LIST_INT_EN BIT(3)
81#define SPRD_DMA_CFG_ERR_INT_EN BIT(4)
82
83/* SPRD_DMA_CHN_CFG register definition */
84#define SPRD_DMA_CHN_EN BIT(0)
Eric Long4ac69542018-08-28 19:09:07 +080085#define SPRD_DMA_LINKLIST_EN BIT(4)
Baolin Wang9b3b8172017-10-24 13:47:50 +080086#define SPRD_DMA_WAIT_BDONE_OFFSET 24
87#define SPRD_DMA_DONOT_WAIT_BDONE 1
88
89/* SPRD_DMA_CHN_REQ register definition */
90#define SPRD_DMA_REQ_EN BIT(0)
91
92/* SPRD_DMA_CHN_PAUSE register definition */
93#define SPRD_DMA_PAUSE_EN BIT(0)
94#define SPRD_DMA_PAUSE_STS BIT(2)
95#define SPRD_DMA_PAUSE_CNT 0x2000
96
97/* DMA_CHN_WARP_* register definition */
98#define SPRD_DMA_HIGH_ADDR_MASK GENMASK(31, 28)
99#define SPRD_DMA_LOW_ADDR_MASK GENMASK(31, 0)
100#define SPRD_DMA_HIGH_ADDR_OFFSET 4
101
102/* SPRD_DMA_CHN_INTC register definition */
103#define SPRD_DMA_FRAG_INT_STS BIT(16)
104#define SPRD_DMA_BLK_INT_STS BIT(17)
105#define SPRD_DMA_TRSC_INT_STS BIT(18)
106#define SPRD_DMA_LIST_INT_STS BIT(19)
107#define SPRD_DMA_CFGERR_INT_STS BIT(20)
108#define SPRD_DMA_CHN_INT_STS \
109 (SPRD_DMA_FRAG_INT_STS | SPRD_DMA_BLK_INT_STS | \
110 SPRD_DMA_TRSC_INT_STS | SPRD_DMA_LIST_INT_STS | \
111 SPRD_DMA_CFGERR_INT_STS)
112
113/* SPRD_DMA_CHN_FRG_LEN register definition */
114#define SPRD_DMA_SRC_DATAWIDTH_OFFSET 30
115#define SPRD_DMA_DES_DATAWIDTH_OFFSET 28
116#define SPRD_DMA_SWT_MODE_OFFSET 26
117#define SPRD_DMA_REQ_MODE_OFFSET 24
118#define SPRD_DMA_REQ_MODE_MASK GENMASK(1, 0)
119#define SPRD_DMA_FIX_SEL_OFFSET 21
120#define SPRD_DMA_FIX_EN_OFFSET 20
Eric Long4ac69542018-08-28 19:09:07 +0800121#define SPRD_DMA_LLIST_END BIT(19)
Baolin Wang9b3b8172017-10-24 13:47:50 +0800122#define SPRD_DMA_FRG_LEN_MASK GENMASK(16, 0)
123
124/* SPRD_DMA_CHN_BLK_LEN register definition */
125#define SPRD_DMA_BLK_LEN_MASK GENMASK(16, 0)
126
127/* SPRD_DMA_CHN_TRSC_LEN register definition */
128#define SPRD_DMA_TRSC_LEN_MASK GENMASK(27, 0)
129
130/* SPRD_DMA_CHN_TRSF_STEP register definition */
131#define SPRD_DMA_DEST_TRSF_STEP_OFFSET 16
132#define SPRD_DMA_SRC_TRSF_STEP_OFFSET 0
133#define SPRD_DMA_TRSF_STEP_MASK GENMASK(15, 0)
134
Eric Long770399d2018-11-06 13:01:36 +0800135/* define DMA channel mode & trigger mode mask */
136#define SPRD_DMA_CHN_MODE_MASK GENMASK(7, 0)
137#define SPRD_DMA_TRG_MODE_MASK GENMASK(7, 0)
138
Eric Long6b1d2552018-04-19 10:00:46 +0800139/* define the DMA transfer step type */
140#define SPRD_DMA_NONE_STEP 0
141#define SPRD_DMA_BYTE_STEP 1
142#define SPRD_DMA_SHORT_STEP 2
143#define SPRD_DMA_WORD_STEP 4
144#define SPRD_DMA_DWORD_STEP 8
145
Baolin Wang9b3b8172017-10-24 13:47:50 +0800146#define SPRD_DMA_SOFTWARE_UID 0
147
Baolin Wangd7c33cf2018-04-19 10:00:47 +0800148/* dma data width values */
149enum sprd_dma_datawidth {
150 SPRD_DMA_DATAWIDTH_1_BYTE,
151 SPRD_DMA_DATAWIDTH_2_BYTES,
152 SPRD_DMA_DATAWIDTH_4_BYTES,
153 SPRD_DMA_DATAWIDTH_8_BYTES,
Baolin Wang9b3b8172017-10-24 13:47:50 +0800154};
155
156/* dma channel hardware configuration */
157struct sprd_dma_chn_hw {
158 u32 pause;
159 u32 req;
160 u32 cfg;
161 u32 intc;
162 u32 src_addr;
163 u32 des_addr;
164 u32 frg_len;
165 u32 blk_len;
166 u32 trsc_len;
167 u32 trsf_step;
168 u32 wrap_ptr;
169 u32 wrap_to;
170 u32 llist_ptr;
171 u32 frg_step;
172 u32 src_blk_step;
173 u32 des_blk_step;
174};
175
176/* dma request description */
177struct sprd_dma_desc {
178 struct virt_dma_desc vd;
179 struct sprd_dma_chn_hw chn_hw;
Eric Longd762ab32018-11-06 13:01:32 +0800180 enum dma_transfer_direction dir;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800181};
182
183/* dma channel description */
184struct sprd_dma_chn {
185 struct virt_dma_chan vc;
186 void __iomem *chn_base;
Eric Long4ac69542018-08-28 19:09:07 +0800187 struct sprd_dma_linklist linklist;
Eric Longca1b7d32018-05-23 17:31:11 +0800188 struct dma_slave_config slave_cfg;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800189 u32 chn_num;
190 u32 dev_id;
Eric Long770399d2018-11-06 13:01:36 +0800191 enum sprd_dma_chn_mode chn_mode;
192 enum sprd_dma_trg_mode trg_mode;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800193 struct sprd_dma_desc *cur_desc;
194};
195
196/* SPRD dma device */
197struct sprd_dma_dev {
198 struct dma_device dma_dev;
199 void __iomem *glb_base;
200 struct clk *clk;
201 struct clk *ashb_clk;
202 int irq;
203 u32 total_chns;
204 struct sprd_dma_chn channels[0];
205};
206
207static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param);
208static struct of_dma_filter_info sprd_dma_info = {
209 .filter_fn = sprd_dma_filter_fn,
210};
211
212static inline struct sprd_dma_chn *to_sprd_dma_chan(struct dma_chan *c)
213{
214 return container_of(c, struct sprd_dma_chn, vc.chan);
215}
216
217static inline struct sprd_dma_dev *to_sprd_dma_dev(struct dma_chan *c)
218{
219 struct sprd_dma_chn *schan = to_sprd_dma_chan(c);
220
221 return container_of(schan, struct sprd_dma_dev, channels[c->chan_id]);
222}
223
224static inline struct sprd_dma_desc *to_sprd_dma_desc(struct virt_dma_desc *vd)
225{
226 return container_of(vd, struct sprd_dma_desc, vd);
227}
228
Eric Long770399d2018-11-06 13:01:36 +0800229static void sprd_dma_glb_update(struct sprd_dma_dev *sdev, u32 reg,
230 u32 mask, u32 val)
231{
232 u32 orig = readl(sdev->glb_base + reg);
233 u32 tmp;
234
235 tmp = (orig & ~mask) | val;
236 writel(tmp, sdev->glb_base + reg);
237}
238
Baolin Wang9b3b8172017-10-24 13:47:50 +0800239static void sprd_dma_chn_update(struct sprd_dma_chn *schan, u32 reg,
240 u32 mask, u32 val)
241{
242 u32 orig = readl(schan->chn_base + reg);
243 u32 tmp;
244
245 tmp = (orig & ~mask) | val;
246 writel(tmp, schan->chn_base + reg);
247}
248
249static int sprd_dma_enable(struct sprd_dma_dev *sdev)
250{
251 int ret;
252
253 ret = clk_prepare_enable(sdev->clk);
254 if (ret)
255 return ret;
256
257 /*
258 * The ashb_clk is optional and only for AGCP DMA controller, so we
259 * need add one condition to check if the ashb_clk need enable.
260 */
261 if (!IS_ERR(sdev->ashb_clk))
262 ret = clk_prepare_enable(sdev->ashb_clk);
263
264 return ret;
265}
266
267static void sprd_dma_disable(struct sprd_dma_dev *sdev)
268{
269 clk_disable_unprepare(sdev->clk);
270
271 /*
272 * Need to check if we need disable the optional ashb_clk for AGCP DMA.
273 */
274 if (!IS_ERR(sdev->ashb_clk))
275 clk_disable_unprepare(sdev->ashb_clk);
276}
277
278static void sprd_dma_set_uid(struct sprd_dma_chn *schan)
279{
280 struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
281 u32 dev_id = schan->dev_id;
282
283 if (dev_id != SPRD_DMA_SOFTWARE_UID) {
284 u32 uid_offset = SPRD_DMA_GLB_REQ_UID_OFFSET +
285 SPRD_DMA_GLB_REQ_UID(dev_id);
286
287 writel(schan->chn_num + 1, sdev->glb_base + uid_offset);
288 }
289}
290
291static void sprd_dma_unset_uid(struct sprd_dma_chn *schan)
292{
293 struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
294 u32 dev_id = schan->dev_id;
295
296 if (dev_id != SPRD_DMA_SOFTWARE_UID) {
297 u32 uid_offset = SPRD_DMA_GLB_REQ_UID_OFFSET +
298 SPRD_DMA_GLB_REQ_UID(dev_id);
299
300 writel(0, sdev->glb_base + uid_offset);
301 }
302}
303
304static void sprd_dma_clear_int(struct sprd_dma_chn *schan)
305{
306 sprd_dma_chn_update(schan, SPRD_DMA_CHN_INTC,
307 SPRD_DMA_INT_MASK << SPRD_DMA_INT_CLR_OFFSET,
308 SPRD_DMA_INT_MASK << SPRD_DMA_INT_CLR_OFFSET);
309}
310
311static void sprd_dma_enable_chn(struct sprd_dma_chn *schan)
312{
313 sprd_dma_chn_update(schan, SPRD_DMA_CHN_CFG, SPRD_DMA_CHN_EN,
314 SPRD_DMA_CHN_EN);
315}
316
317static void sprd_dma_disable_chn(struct sprd_dma_chn *schan)
318{
319 sprd_dma_chn_update(schan, SPRD_DMA_CHN_CFG, SPRD_DMA_CHN_EN, 0);
320}
321
322static void sprd_dma_soft_request(struct sprd_dma_chn *schan)
323{
324 sprd_dma_chn_update(schan, SPRD_DMA_CHN_REQ, SPRD_DMA_REQ_EN,
325 SPRD_DMA_REQ_EN);
326}
327
328static void sprd_dma_pause_resume(struct sprd_dma_chn *schan, bool enable)
329{
330 struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
331 u32 pause, timeout = SPRD_DMA_PAUSE_CNT;
332
333 if (enable) {
334 sprd_dma_chn_update(schan, SPRD_DMA_CHN_PAUSE,
335 SPRD_DMA_PAUSE_EN, SPRD_DMA_PAUSE_EN);
336
337 do {
338 pause = readl(schan->chn_base + SPRD_DMA_CHN_PAUSE);
339 if (pause & SPRD_DMA_PAUSE_STS)
340 break;
341
342 cpu_relax();
343 } while (--timeout > 0);
344
345 if (!timeout)
346 dev_warn(sdev->dma_dev.dev,
347 "pause dma controller timeout\n");
348 } else {
349 sprd_dma_chn_update(schan, SPRD_DMA_CHN_PAUSE,
350 SPRD_DMA_PAUSE_EN, 0);
351 }
352}
353
354static void sprd_dma_stop_and_disable(struct sprd_dma_chn *schan)
355{
356 u32 cfg = readl(schan->chn_base + SPRD_DMA_CHN_CFG);
357
358 if (!(cfg & SPRD_DMA_CHN_EN))
359 return;
360
361 sprd_dma_pause_resume(schan, true);
362 sprd_dma_disable_chn(schan);
363}
364
Eric Longd762ab32018-11-06 13:01:32 +0800365static unsigned long sprd_dma_get_src_addr(struct sprd_dma_chn *schan)
366{
367 unsigned long addr, addr_high;
368
369 addr = readl(schan->chn_base + SPRD_DMA_CHN_SRC_ADDR);
370 addr_high = readl(schan->chn_base + SPRD_DMA_CHN_WARP_PTR) &
371 SPRD_DMA_HIGH_ADDR_MASK;
372
373 return addr | (addr_high << SPRD_DMA_HIGH_ADDR_OFFSET);
374}
375
Baolin Wang9b3b8172017-10-24 13:47:50 +0800376static unsigned long sprd_dma_get_dst_addr(struct sprd_dma_chn *schan)
377{
378 unsigned long addr, addr_high;
379
380 addr = readl(schan->chn_base + SPRD_DMA_CHN_DES_ADDR);
381 addr_high = readl(schan->chn_base + SPRD_DMA_CHN_WARP_TO) &
382 SPRD_DMA_HIGH_ADDR_MASK;
383
384 return addr | (addr_high << SPRD_DMA_HIGH_ADDR_OFFSET);
385}
386
387static enum sprd_dma_int_type sprd_dma_get_int_type(struct sprd_dma_chn *schan)
388{
389 struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
390 u32 intc_sts = readl(schan->chn_base + SPRD_DMA_CHN_INTC) &
391 SPRD_DMA_CHN_INT_STS;
392
393 switch (intc_sts) {
394 case SPRD_DMA_CFGERR_INT_STS:
395 return SPRD_DMA_CFGERR_INT;
396
397 case SPRD_DMA_LIST_INT_STS:
398 return SPRD_DMA_LIST_INT;
399
400 case SPRD_DMA_TRSC_INT_STS:
401 return SPRD_DMA_TRANS_INT;
402
403 case SPRD_DMA_BLK_INT_STS:
404 return SPRD_DMA_BLK_INT;
405
406 case SPRD_DMA_FRAG_INT_STS:
407 return SPRD_DMA_FRAG_INT;
408
409 default:
410 dev_warn(sdev->dma_dev.dev, "incorrect dma interrupt type\n");
411 return SPRD_DMA_NO_INT;
412 }
413}
414
415static enum sprd_dma_req_mode sprd_dma_get_req_type(struct sprd_dma_chn *schan)
416{
417 u32 frag_reg = readl(schan->chn_base + SPRD_DMA_CHN_FRG_LEN);
418
419 return (frag_reg >> SPRD_DMA_REQ_MODE_OFFSET) & SPRD_DMA_REQ_MODE_MASK;
420}
421
Eric Long770399d2018-11-06 13:01:36 +0800422static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan)
423{
424 struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
425 u32 val, chn = schan->chn_num + 1;
426
427 switch (schan->chn_mode) {
428 case SPRD_DMA_SRC_CHN0:
429 val = chn & SPRD_DMA_GLB_SRC_CHN_MASK;
430 val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET;
431 val |= SPRD_DMA_GLB_2STAGE_EN;
432 sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, val, val);
433 break;
434
435 case SPRD_DMA_SRC_CHN1:
436 val = chn & SPRD_DMA_GLB_SRC_CHN_MASK;
437 val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET;
438 val |= SPRD_DMA_GLB_2STAGE_EN;
439 sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, val, val);
440 break;
441
442 case SPRD_DMA_DST_CHN0:
443 val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) &
444 SPRD_DMA_GLB_DEST_CHN_MASK;
445 val |= SPRD_DMA_GLB_2STAGE_EN;
446 sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, val, val);
447 break;
448
449 case SPRD_DMA_DST_CHN1:
450 val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) &
451 SPRD_DMA_GLB_DEST_CHN_MASK;
452 val |= SPRD_DMA_GLB_2STAGE_EN;
453 sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, val, val);
454 break;
455
456 default:
457 dev_err(sdev->dma_dev.dev, "invalid channel mode setting %d\n",
458 schan->chn_mode);
459 return -EINVAL;
460 }
461
462 return 0;
463}
464
Baolin Wang9b3b8172017-10-24 13:47:50 +0800465static void sprd_dma_set_chn_config(struct sprd_dma_chn *schan,
466 struct sprd_dma_desc *sdesc)
467{
468 struct sprd_dma_chn_hw *cfg = &sdesc->chn_hw;
469
470 writel(cfg->pause, schan->chn_base + SPRD_DMA_CHN_PAUSE);
471 writel(cfg->cfg, schan->chn_base + SPRD_DMA_CHN_CFG);
472 writel(cfg->intc, schan->chn_base + SPRD_DMA_CHN_INTC);
473 writel(cfg->src_addr, schan->chn_base + SPRD_DMA_CHN_SRC_ADDR);
474 writel(cfg->des_addr, schan->chn_base + SPRD_DMA_CHN_DES_ADDR);
475 writel(cfg->frg_len, schan->chn_base + SPRD_DMA_CHN_FRG_LEN);
476 writel(cfg->blk_len, schan->chn_base + SPRD_DMA_CHN_BLK_LEN);
477 writel(cfg->trsc_len, schan->chn_base + SPRD_DMA_CHN_TRSC_LEN);
478 writel(cfg->trsf_step, schan->chn_base + SPRD_DMA_CHN_TRSF_STEP);
479 writel(cfg->wrap_ptr, schan->chn_base + SPRD_DMA_CHN_WARP_PTR);
480 writel(cfg->wrap_to, schan->chn_base + SPRD_DMA_CHN_WARP_TO);
481 writel(cfg->llist_ptr, schan->chn_base + SPRD_DMA_CHN_LLIST_PTR);
482 writel(cfg->frg_step, schan->chn_base + SPRD_DMA_CHN_FRAG_STEP);
483 writel(cfg->src_blk_step, schan->chn_base + SPRD_DMA_CHN_SRC_BLK_STEP);
484 writel(cfg->des_blk_step, schan->chn_base + SPRD_DMA_CHN_DES_BLK_STEP);
485 writel(cfg->req, schan->chn_base + SPRD_DMA_CHN_REQ);
486}
487
488static void sprd_dma_start(struct sprd_dma_chn *schan)
489{
490 struct virt_dma_desc *vd = vchan_next_desc(&schan->vc);
491
492 if (!vd)
493 return;
494
495 list_del(&vd->node);
496 schan->cur_desc = to_sprd_dma_desc(vd);
497
498 /*
Eric Long770399d2018-11-06 13:01:36 +0800499 * Set 2-stage configuration if the channel starts one 2-stage
500 * transfer.
501 */
502 if (schan->chn_mode && sprd_dma_set_2stage_config(schan))
503 return;
504
505 /*
Baolin Wang9b3b8172017-10-24 13:47:50 +0800506 * Copy the DMA configuration from DMA descriptor to this hardware
507 * channel.
508 */
509 sprd_dma_set_chn_config(schan, schan->cur_desc);
510 sprd_dma_set_uid(schan);
511 sprd_dma_enable_chn(schan);
512
513 if (schan->dev_id == SPRD_DMA_SOFTWARE_UID)
514 sprd_dma_soft_request(schan);
515}
516
517static void sprd_dma_stop(struct sprd_dma_chn *schan)
518{
519 sprd_dma_stop_and_disable(schan);
520 sprd_dma_unset_uid(schan);
521 sprd_dma_clear_int(schan);
Eric Long0e5d7b12018-11-06 13:01:34 +0800522 schan->cur_desc = NULL;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800523}
524
525static bool sprd_dma_check_trans_done(struct sprd_dma_desc *sdesc,
526 enum sprd_dma_int_type int_type,
527 enum sprd_dma_req_mode req_mode)
528{
529 if (int_type == SPRD_DMA_NO_INT)
530 return false;
531
532 if (int_type >= req_mode + 1)
533 return true;
534 else
535 return false;
536}
537
538static irqreturn_t dma_irq_handle(int irq, void *dev_id)
539{
540 struct sprd_dma_dev *sdev = (struct sprd_dma_dev *)dev_id;
541 u32 irq_status = readl(sdev->glb_base + SPRD_DMA_GLB_INT_MSK_STS);
542 struct sprd_dma_chn *schan;
543 struct sprd_dma_desc *sdesc;
544 enum sprd_dma_req_mode req_type;
545 enum sprd_dma_int_type int_type;
Eric Long97dbd6e2018-11-06 13:01:35 +0800546 bool trans_done = false, cyclic = false;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800547 u32 i;
548
549 while (irq_status) {
550 i = __ffs(irq_status);
551 irq_status &= (irq_status - 1);
552 schan = &sdev->channels[i];
553
554 spin_lock(&schan->vc.lock);
555 int_type = sprd_dma_get_int_type(schan);
556 req_type = sprd_dma_get_req_type(schan);
557 sprd_dma_clear_int(schan);
558
559 sdesc = schan->cur_desc;
560
Eric Long97dbd6e2018-11-06 13:01:35 +0800561 /* cyclic mode schedule callback */
562 cyclic = schan->linklist.phy_addr ? true : false;
563 if (cyclic == true) {
564 vchan_cyclic_callback(&sdesc->vd);
565 } else {
566 /* Check if the dma request descriptor is done. */
567 trans_done = sprd_dma_check_trans_done(sdesc, int_type,
568 req_type);
569 if (trans_done == true) {
570 vchan_cookie_complete(&sdesc->vd);
571 schan->cur_desc = NULL;
572 sprd_dma_start(schan);
573 }
Baolin Wang9b3b8172017-10-24 13:47:50 +0800574 }
575 spin_unlock(&schan->vc.lock);
576 }
577
578 return IRQ_HANDLED;
579}
580
581static int sprd_dma_alloc_chan_resources(struct dma_chan *chan)
582{
Baolin Wangffb5be72019-02-21 13:34:41 +0800583 return pm_runtime_get_sync(chan->device->dev);
Baolin Wang9b3b8172017-10-24 13:47:50 +0800584}
585
586static void sprd_dma_free_chan_resources(struct dma_chan *chan)
587{
588 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
589 unsigned long flags;
590
591 spin_lock_irqsave(&schan->vc.lock, flags);
592 sprd_dma_stop(schan);
593 spin_unlock_irqrestore(&schan->vc.lock, flags);
594
595 vchan_free_chan_resources(&schan->vc);
596 pm_runtime_put(chan->device->dev);
597}
598
599static enum dma_status sprd_dma_tx_status(struct dma_chan *chan,
600 dma_cookie_t cookie,
601 struct dma_tx_state *txstate)
602{
603 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
604 struct virt_dma_desc *vd;
605 unsigned long flags;
606 enum dma_status ret;
607 u32 pos;
608
609 ret = dma_cookie_status(chan, cookie, txstate);
610 if (ret == DMA_COMPLETE || !txstate)
611 return ret;
612
613 spin_lock_irqsave(&schan->vc.lock, flags);
614 vd = vchan_find_desc(&schan->vc, cookie);
615 if (vd) {
616 struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd);
617 struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
618
619 if (hw->trsc_len > 0)
620 pos = hw->trsc_len;
621 else if (hw->blk_len > 0)
622 pos = hw->blk_len;
623 else if (hw->frg_len > 0)
624 pos = hw->frg_len;
625 else
626 pos = 0;
627 } else if (schan->cur_desc && schan->cur_desc->vd.tx.cookie == cookie) {
Baolin Wang16d0f852019-05-06 15:28:28 +0800628 struct sprd_dma_desc *sdesc = schan->cur_desc;
Eric Longd762ab32018-11-06 13:01:32 +0800629
630 if (sdesc->dir == DMA_DEV_TO_MEM)
631 pos = sprd_dma_get_dst_addr(schan);
632 else
633 pos = sprd_dma_get_src_addr(schan);
Baolin Wang9b3b8172017-10-24 13:47:50 +0800634 } else {
635 pos = 0;
636 }
637 spin_unlock_irqrestore(&schan->vc.lock, flags);
638
639 dma_set_residue(txstate, pos);
640 return ret;
641}
642
643static void sprd_dma_issue_pending(struct dma_chan *chan)
644{
645 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
646 unsigned long flags;
647
648 spin_lock_irqsave(&schan->vc.lock, flags);
649 if (vchan_issue_pending(&schan->vc) && !schan->cur_desc)
650 sprd_dma_start(schan);
651 spin_unlock_irqrestore(&schan->vc.lock, flags);
652}
653
Eric Longca1b7d32018-05-23 17:31:11 +0800654static int sprd_dma_get_datawidth(enum dma_slave_buswidth buswidth)
655{
656 switch (buswidth) {
657 case DMA_SLAVE_BUSWIDTH_1_BYTE:
658 case DMA_SLAVE_BUSWIDTH_2_BYTES:
659 case DMA_SLAVE_BUSWIDTH_4_BYTES:
660 case DMA_SLAVE_BUSWIDTH_8_BYTES:
661 return ffs(buswidth) - 1;
662
663 default:
664 return -EINVAL;
665 }
666}
667
668static int sprd_dma_get_step(enum dma_slave_buswidth buswidth)
669{
670 switch (buswidth) {
671 case DMA_SLAVE_BUSWIDTH_1_BYTE:
672 case DMA_SLAVE_BUSWIDTH_2_BYTES:
673 case DMA_SLAVE_BUSWIDTH_4_BYTES:
674 case DMA_SLAVE_BUSWIDTH_8_BYTES:
675 return buswidth;
676
677 default:
678 return -EINVAL;
679 }
680}
681
682static int sprd_dma_fill_desc(struct dma_chan *chan,
Eric Long4ac69542018-08-28 19:09:07 +0800683 struct sprd_dma_chn_hw *hw,
684 unsigned int sglen, int sg_index,
Eric Longca1b7d32018-05-23 17:31:11 +0800685 dma_addr_t src, dma_addr_t dst, u32 len,
686 enum dma_transfer_direction dir,
687 unsigned long flags,
688 struct dma_slave_config *slave_cfg)
Baolin Wang9b3b8172017-10-24 13:47:50 +0800689{
690 struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
Eric Longca1b7d32018-05-23 17:31:11 +0800691 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
Eric Long770399d2018-11-06 13:01:36 +0800692 enum sprd_dma_chn_mode chn_mode = schan->chn_mode;
Eric Longca1b7d32018-05-23 17:31:11 +0800693 u32 req_mode = (flags >> SPRD_DMA_REQ_SHIFT) & SPRD_DMA_REQ_MODE_MASK;
694 u32 int_mode = flags & SPRD_DMA_INT_MASK;
695 int src_datawidth, dst_datawidth, src_step, dst_step;
696 u32 temp, fix_mode = 0, fix_en = 0;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800697
Eric Longca1b7d32018-05-23 17:31:11 +0800698 if (dir == DMA_MEM_TO_DEV) {
699 src_step = sprd_dma_get_step(slave_cfg->src_addr_width);
700 if (src_step < 0) {
701 dev_err(sdev->dma_dev.dev, "invalid source step\n");
702 return src_step;
703 }
Eric Long770399d2018-11-06 13:01:36 +0800704
705 /*
706 * For 2-stage transfer, destination channel step can not be 0,
707 * since destination device is AON IRAM.
708 */
709 if (chn_mode == SPRD_DMA_DST_CHN0 ||
710 chn_mode == SPRD_DMA_DST_CHN1)
711 dst_step = src_step;
712 else
713 dst_step = SPRD_DMA_NONE_STEP;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800714 } else {
Eric Longca1b7d32018-05-23 17:31:11 +0800715 dst_step = sprd_dma_get_step(slave_cfg->dst_addr_width);
716 if (dst_step < 0) {
717 dev_err(sdev->dma_dev.dev, "invalid destination step\n");
718 return dst_step;
719 }
720 src_step = SPRD_DMA_NONE_STEP;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800721 }
722
Eric Longca1b7d32018-05-23 17:31:11 +0800723 src_datawidth = sprd_dma_get_datawidth(slave_cfg->src_addr_width);
724 if (src_datawidth < 0) {
725 dev_err(sdev->dma_dev.dev, "invalid source datawidth\n");
726 return src_datawidth;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800727 }
728
Eric Longca1b7d32018-05-23 17:31:11 +0800729 dst_datawidth = sprd_dma_get_datawidth(slave_cfg->dst_addr_width);
730 if (dst_datawidth < 0) {
731 dev_err(sdev->dma_dev.dev, "invalid destination datawidth\n");
732 return dst_datawidth;
733 }
734
735 if (slave_cfg->slave_id)
736 schan->dev_id = slave_cfg->slave_id;
737
Baolin Wang9b3b8172017-10-24 13:47:50 +0800738 hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800739
Eric Longca1b7d32018-05-23 17:31:11 +0800740 /*
741 * wrap_ptr and wrap_to will save the high 4 bits source address and
742 * destination address.
743 */
744 hw->wrap_ptr = (src >> SPRD_DMA_HIGH_ADDR_OFFSET) & SPRD_DMA_HIGH_ADDR_MASK;
745 hw->wrap_to = (dst >> SPRD_DMA_HIGH_ADDR_OFFSET) & SPRD_DMA_HIGH_ADDR_MASK;
746 hw->src_addr = src & SPRD_DMA_LOW_ADDR_MASK;
747 hw->des_addr = dst & SPRD_DMA_LOW_ADDR_MASK;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800748
Eric Longca1b7d32018-05-23 17:31:11 +0800749 /*
750 * If the src step and dst step both are 0 or both are not 0, that means
751 * we can not enable the fix mode. If one is 0 and another one is not,
752 * we can enable the fix mode.
753 */
754 if ((src_step != 0 && dst_step != 0) || (src_step | dst_step) == 0) {
Baolin Wang9b3b8172017-10-24 13:47:50 +0800755 fix_en = 0;
756 } else {
757 fix_en = 1;
758 if (src_step)
759 fix_mode = 1;
760 else
761 fix_mode = 0;
762 }
763
Eric Longca1b7d32018-05-23 17:31:11 +0800764 hw->intc = int_mode | SPRD_DMA_CFG_ERR_INT_EN;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800765
Eric Longca1b7d32018-05-23 17:31:11 +0800766 temp = src_datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET;
767 temp |= dst_datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET;
768 temp |= req_mode << SPRD_DMA_REQ_MODE_OFFSET;
769 temp |= fix_mode << SPRD_DMA_FIX_SEL_OFFSET;
770 temp |= fix_en << SPRD_DMA_FIX_EN_OFFSET;
771 temp |= slave_cfg->src_maxburst & SPRD_DMA_FRG_LEN_MASK;
772 hw->frg_len = temp;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800773
Eric Longca1b7d32018-05-23 17:31:11 +0800774 hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK;
775 hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800776
Eric Longca1b7d32018-05-23 17:31:11 +0800777 temp = (dst_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET;
778 temp |= (src_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_SRC_TRSF_STEP_OFFSET;
779 hw->trsf_step = temp;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800780
Eric Long4ac69542018-08-28 19:09:07 +0800781 /* link-list configuration */
782 if (schan->linklist.phy_addr) {
Eric Long4ac69542018-08-28 19:09:07 +0800783 hw->cfg |= SPRD_DMA_LINKLIST_EN;
784
785 /* link-list index */
Eric Long13e89972018-11-06 13:01:33 +0800786 temp = sglen ? (sg_index + 1) % sglen : 0;
787
Eric Long4ac69542018-08-28 19:09:07 +0800788 /* Next link-list configuration's physical address offset */
789 temp = temp * sizeof(*hw) + SPRD_DMA_CHN_SRC_ADDR;
790 /*
791 * Set the link-list pointer point to next link-list
792 * configuration's physical address.
793 */
794 hw->llist_ptr = schan->linklist.phy_addr + temp;
795 } else {
796 hw->llist_ptr = 0;
797 }
798
Baolin Wang9b3b8172017-10-24 13:47:50 +0800799 hw->frg_step = 0;
800 hw->src_blk_step = 0;
801 hw->des_blk_step = 0;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800802 return 0;
803}
804
Eric Long4ac69542018-08-28 19:09:07 +0800805static int sprd_dma_fill_linklist_desc(struct dma_chan *chan,
806 unsigned int sglen, int sg_index,
807 dma_addr_t src, dma_addr_t dst, u32 len,
808 enum dma_transfer_direction dir,
809 unsigned long flags,
810 struct dma_slave_config *slave_cfg)
811{
812 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
813 struct sprd_dma_chn_hw *hw;
814
815 if (!schan->linklist.virt_addr)
816 return -EINVAL;
817
818 hw = (struct sprd_dma_chn_hw *)(schan->linklist.virt_addr +
819 sg_index * sizeof(*hw));
820
821 return sprd_dma_fill_desc(chan, hw, sglen, sg_index, src, dst, len,
822 dir, flags, slave_cfg);
823}
824
Vinod Koul1ab8da12018-01-12 22:31:17 +0530825static struct dma_async_tx_descriptor *
Baolin Wang9b3b8172017-10-24 13:47:50 +0800826sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
827 size_t len, unsigned long flags)
828{
829 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
830 struct sprd_dma_desc *sdesc;
Eric Long32fa2012018-05-23 17:31:10 +0800831 struct sprd_dma_chn_hw *hw;
832 enum sprd_dma_datawidth datawidth;
833 u32 step, temp;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800834
835 sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
836 if (!sdesc)
837 return NULL;
838
Eric Long32fa2012018-05-23 17:31:10 +0800839 hw = &sdesc->chn_hw;
840
841 hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
842 hw->intc = SPRD_DMA_TRANS_INT | SPRD_DMA_CFG_ERR_INT_EN;
843 hw->src_addr = src & SPRD_DMA_LOW_ADDR_MASK;
844 hw->des_addr = dest & SPRD_DMA_LOW_ADDR_MASK;
845 hw->wrap_ptr = (src >> SPRD_DMA_HIGH_ADDR_OFFSET) &
846 SPRD_DMA_HIGH_ADDR_MASK;
847 hw->wrap_to = (dest >> SPRD_DMA_HIGH_ADDR_OFFSET) &
848 SPRD_DMA_HIGH_ADDR_MASK;
849
850 if (IS_ALIGNED(len, 8)) {
851 datawidth = SPRD_DMA_DATAWIDTH_8_BYTES;
852 step = SPRD_DMA_DWORD_STEP;
853 } else if (IS_ALIGNED(len, 4)) {
854 datawidth = SPRD_DMA_DATAWIDTH_4_BYTES;
855 step = SPRD_DMA_WORD_STEP;
856 } else if (IS_ALIGNED(len, 2)) {
857 datawidth = SPRD_DMA_DATAWIDTH_2_BYTES;
858 step = SPRD_DMA_SHORT_STEP;
859 } else {
860 datawidth = SPRD_DMA_DATAWIDTH_1_BYTE;
861 step = SPRD_DMA_BYTE_STEP;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800862 }
863
Eric Long32fa2012018-05-23 17:31:10 +0800864 temp = datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET;
865 temp |= datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET;
866 temp |= SPRD_DMA_TRANS_REQ << SPRD_DMA_REQ_MODE_OFFSET;
867 temp |= len & SPRD_DMA_FRG_LEN_MASK;
868 hw->frg_len = temp;
869
870 hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK;
871 hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK;
872
873 temp = (step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET;
874 temp |= (step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_SRC_TRSF_STEP_OFFSET;
875 hw->trsf_step = temp;
876
Baolin Wang9b3b8172017-10-24 13:47:50 +0800877 return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
878}
879
Eric Longca1b7d32018-05-23 17:31:11 +0800880static struct dma_async_tx_descriptor *
881sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
882 unsigned int sglen, enum dma_transfer_direction dir,
883 unsigned long flags, void *context)
884{
885 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
886 struct dma_slave_config *slave_cfg = &schan->slave_cfg;
887 dma_addr_t src = 0, dst = 0;
888 struct sprd_dma_desc *sdesc;
889 struct scatterlist *sg;
890 u32 len = 0;
891 int ret, i;
892
Eric Long4ac69542018-08-28 19:09:07 +0800893 if (!is_slave_direction(dir))
Eric Longca1b7d32018-05-23 17:31:11 +0800894 return NULL;
895
Eric Long4ac69542018-08-28 19:09:07 +0800896 if (context) {
897 struct sprd_dma_linklist *ll_cfg =
898 (struct sprd_dma_linklist *)context;
899
900 schan->linklist.phy_addr = ll_cfg->phy_addr;
901 schan->linklist.virt_addr = ll_cfg->virt_addr;
902 } else {
903 schan->linklist.phy_addr = 0;
904 schan->linklist.virt_addr = 0;
905 }
906
Eric Longca1b7d32018-05-23 17:31:11 +0800907 sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
908 if (!sdesc)
909 return NULL;
910
Eric Longd762ab32018-11-06 13:01:32 +0800911 sdesc->dir = dir;
912
Eric Longca1b7d32018-05-23 17:31:11 +0800913 for_each_sg(sgl, sg, sglen, i) {
914 len = sg_dma_len(sg);
915
916 if (dir == DMA_MEM_TO_DEV) {
917 src = sg_dma_address(sg);
918 dst = slave_cfg->dst_addr;
919 } else {
920 src = slave_cfg->src_addr;
921 dst = sg_dma_address(sg);
922 }
Eric Long4ac69542018-08-28 19:09:07 +0800923
924 /*
925 * The link-list mode needs at least 2 link-list
926 * configurations. If there is only one sg, it doesn't
927 * need to fill the link-list configuration.
928 */
929 if (sglen < 2)
930 break;
931
932 ret = sprd_dma_fill_linklist_desc(chan, sglen, i, src, dst, len,
933 dir, flags, slave_cfg);
934 if (ret) {
935 kfree(sdesc);
936 return NULL;
937 }
Eric Longca1b7d32018-05-23 17:31:11 +0800938 }
939
Eric Long770399d2018-11-06 13:01:36 +0800940 /* Set channel mode and trigger mode for 2-stage transfer */
941 schan->chn_mode =
942 (flags >> SPRD_DMA_CHN_MODE_SHIFT) & SPRD_DMA_CHN_MODE_MASK;
943 schan->trg_mode =
944 (flags >> SPRD_DMA_TRG_MODE_SHIFT) & SPRD_DMA_TRG_MODE_MASK;
945
Eric Long4ac69542018-08-28 19:09:07 +0800946 ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len,
947 dir, flags, slave_cfg);
Baolin Wang9b3b8172017-10-24 13:47:50 +0800948 if (ret) {
949 kfree(sdesc);
950 return NULL;
951 }
952
953 return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
954}
955
Eric Longca1b7d32018-05-23 17:31:11 +0800956static int sprd_dma_slave_config(struct dma_chan *chan,
957 struct dma_slave_config *config)
958{
959 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
960 struct dma_slave_config *slave_cfg = &schan->slave_cfg;
961
Eric Longca1b7d32018-05-23 17:31:11 +0800962 memcpy(slave_cfg, config, sizeof(*config));
963 return 0;
964}
965
Baolin Wang9b3b8172017-10-24 13:47:50 +0800966static int sprd_dma_pause(struct dma_chan *chan)
967{
968 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
969 unsigned long flags;
970
971 spin_lock_irqsave(&schan->vc.lock, flags);
972 sprd_dma_pause_resume(schan, true);
973 spin_unlock_irqrestore(&schan->vc.lock, flags);
974
975 return 0;
976}
977
978static int sprd_dma_resume(struct dma_chan *chan)
979{
980 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
981 unsigned long flags;
982
983 spin_lock_irqsave(&schan->vc.lock, flags);
984 sprd_dma_pause_resume(schan, false);
985 spin_unlock_irqrestore(&schan->vc.lock, flags);
986
987 return 0;
988}
989
990static int sprd_dma_terminate_all(struct dma_chan *chan)
991{
992 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
993 unsigned long flags;
994 LIST_HEAD(head);
995
996 spin_lock_irqsave(&schan->vc.lock, flags);
997 sprd_dma_stop(schan);
998
999 vchan_get_all_descriptors(&schan->vc, &head);
1000 spin_unlock_irqrestore(&schan->vc.lock, flags);
1001
1002 vchan_dma_desc_free_list(&schan->vc, &head);
1003 return 0;
1004}
1005
1006static void sprd_dma_free_desc(struct virt_dma_desc *vd)
1007{
1008 struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd);
1009
1010 kfree(sdesc);
1011}
1012
1013static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param)
1014{
1015 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
Baolin Wangffb5be72019-02-21 13:34:41 +08001016 u32 slave_id = *(u32 *)param;
Baolin Wang9b3b8172017-10-24 13:47:50 +08001017
Baolin Wangffb5be72019-02-21 13:34:41 +08001018 schan->dev_id = slave_id;
1019 return true;
Baolin Wang9b3b8172017-10-24 13:47:50 +08001020}
1021
1022static int sprd_dma_probe(struct platform_device *pdev)
1023{
1024 struct device_node *np = pdev->dev.of_node;
1025 struct sprd_dma_dev *sdev;
1026 struct sprd_dma_chn *dma_chn;
1027 struct resource *res;
1028 u32 chn_count;
1029 int ret, i;
1030
1031 ret = device_property_read_u32(&pdev->dev, "#dma-channels", &chn_count);
1032 if (ret) {
1033 dev_err(&pdev->dev, "get dma channels count failed\n");
1034 return ret;
1035 }
1036
Kees Cook0ed2dd02018-05-08 16:08:53 -07001037 sdev = devm_kzalloc(&pdev->dev,
1038 struct_size(sdev, channels, chn_count),
Baolin Wang9b3b8172017-10-24 13:47:50 +08001039 GFP_KERNEL);
1040 if (!sdev)
1041 return -ENOMEM;
1042
1043 sdev->clk = devm_clk_get(&pdev->dev, "enable");
1044 if (IS_ERR(sdev->clk)) {
1045 dev_err(&pdev->dev, "get enable clock failed\n");
1046 return PTR_ERR(sdev->clk);
1047 }
1048
1049 /* ashb clock is optional for AGCP DMA */
1050 sdev->ashb_clk = devm_clk_get(&pdev->dev, "ashb_eb");
1051 if (IS_ERR(sdev->ashb_clk))
1052 dev_warn(&pdev->dev, "no optional ashb eb clock\n");
1053
1054 /*
1055 * We have three DMA controllers: AP DMA, AON DMA and AGCP DMA. For AGCP
1056 * DMA controller, it can or do not request the irq, which will save
1057 * system power without resuming system by DMA interrupts if AGCP DMA
1058 * does not request the irq. Thus the DMA interrupts property should
1059 * be optional.
1060 */
1061 sdev->irq = platform_get_irq(pdev, 0);
1062 if (sdev->irq > 0) {
1063 ret = devm_request_irq(&pdev->dev, sdev->irq, dma_irq_handle,
1064 0, "sprd_dma", (void *)sdev);
1065 if (ret < 0) {
1066 dev_err(&pdev->dev, "request dma irq failed\n");
1067 return ret;
1068 }
1069 } else {
1070 dev_warn(&pdev->dev, "no interrupts for the dma controller\n");
1071 }
1072
1073 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Baolin Wange7f063a2018-05-09 11:23:50 +08001074 sdev->glb_base = devm_ioremap_resource(&pdev->dev, res);
Dan Carpenterfd8d26a2018-05-16 11:48:07 +03001075 if (IS_ERR(sdev->glb_base))
1076 return PTR_ERR(sdev->glb_base);
Baolin Wang9b3b8172017-10-24 13:47:50 +08001077
1078 dma_cap_set(DMA_MEMCPY, sdev->dma_dev.cap_mask);
1079 sdev->total_chns = chn_count;
1080 sdev->dma_dev.chancnt = chn_count;
1081 INIT_LIST_HEAD(&sdev->dma_dev.channels);
1082 INIT_LIST_HEAD(&sdev->dma_dev.global_node);
1083 sdev->dma_dev.dev = &pdev->dev;
1084 sdev->dma_dev.device_alloc_chan_resources = sprd_dma_alloc_chan_resources;
1085 sdev->dma_dev.device_free_chan_resources = sprd_dma_free_chan_resources;
1086 sdev->dma_dev.device_tx_status = sprd_dma_tx_status;
1087 sdev->dma_dev.device_issue_pending = sprd_dma_issue_pending;
1088 sdev->dma_dev.device_prep_dma_memcpy = sprd_dma_prep_dma_memcpy;
Eric Longca1b7d32018-05-23 17:31:11 +08001089 sdev->dma_dev.device_prep_slave_sg = sprd_dma_prep_slave_sg;
1090 sdev->dma_dev.device_config = sprd_dma_slave_config;
Baolin Wang9b3b8172017-10-24 13:47:50 +08001091 sdev->dma_dev.device_pause = sprd_dma_pause;
1092 sdev->dma_dev.device_resume = sprd_dma_resume;
1093 sdev->dma_dev.device_terminate_all = sprd_dma_terminate_all;
1094
1095 for (i = 0; i < chn_count; i++) {
1096 dma_chn = &sdev->channels[i];
1097 dma_chn->chn_num = i;
1098 dma_chn->cur_desc = NULL;
1099 /* get each channel's registers base address. */
1100 dma_chn->chn_base = sdev->glb_base + SPRD_DMA_CHN_REG_OFFSET +
1101 SPRD_DMA_CHN_REG_LENGTH * i;
1102
1103 dma_chn->vc.desc_free = sprd_dma_free_desc;
1104 vchan_init(&dma_chn->vc, &sdev->dma_dev);
1105 }
1106
1107 platform_set_drvdata(pdev, sdev);
1108 ret = sprd_dma_enable(sdev);
1109 if (ret)
1110 return ret;
1111
1112 pm_runtime_set_active(&pdev->dev);
1113 pm_runtime_enable(&pdev->dev);
1114
1115 ret = pm_runtime_get_sync(&pdev->dev);
1116 if (ret < 0)
1117 goto err_rpm;
1118
1119 ret = dma_async_device_register(&sdev->dma_dev);
1120 if (ret < 0) {
1121 dev_err(&pdev->dev, "register dma device failed:%d\n", ret);
1122 goto err_register;
1123 }
1124
1125 sprd_dma_info.dma_cap = sdev->dma_dev.cap_mask;
1126 ret = of_dma_controller_register(np, of_dma_simple_xlate,
1127 &sprd_dma_info);
1128 if (ret)
1129 goto err_of_register;
1130
1131 pm_runtime_put(&pdev->dev);
1132 return 0;
1133
1134err_of_register:
1135 dma_async_device_unregister(&sdev->dma_dev);
1136err_register:
1137 pm_runtime_put_noidle(&pdev->dev);
1138 pm_runtime_disable(&pdev->dev);
1139err_rpm:
1140 sprd_dma_disable(sdev);
1141 return ret;
1142}
1143
1144static int sprd_dma_remove(struct platform_device *pdev)
1145{
1146 struct sprd_dma_dev *sdev = platform_get_drvdata(pdev);
1147 struct sprd_dma_chn *c, *cn;
1148 int ret;
1149
1150 ret = pm_runtime_get_sync(&pdev->dev);
1151 if (ret < 0)
1152 return ret;
1153
1154 /* explicitly free the irq */
1155 if (sdev->irq > 0)
1156 devm_free_irq(&pdev->dev, sdev->irq, sdev);
1157
1158 list_for_each_entry_safe(c, cn, &sdev->dma_dev.channels,
1159 vc.chan.device_node) {
1160 list_del(&c->vc.chan.device_node);
1161 tasklet_kill(&c->vc.task);
1162 }
1163
1164 of_dma_controller_free(pdev->dev.of_node);
1165 dma_async_device_unregister(&sdev->dma_dev);
1166 sprd_dma_disable(sdev);
1167
1168 pm_runtime_put_noidle(&pdev->dev);
1169 pm_runtime_disable(&pdev->dev);
1170 return 0;
1171}
1172
1173static const struct of_device_id sprd_dma_match[] = {
1174 { .compatible = "sprd,sc9860-dma", },
1175 {},
1176};
1177
1178static int __maybe_unused sprd_dma_runtime_suspend(struct device *dev)
1179{
1180 struct sprd_dma_dev *sdev = dev_get_drvdata(dev);
1181
1182 sprd_dma_disable(sdev);
1183 return 0;
1184}
1185
1186static int __maybe_unused sprd_dma_runtime_resume(struct device *dev)
1187{
1188 struct sprd_dma_dev *sdev = dev_get_drvdata(dev);
1189 int ret;
1190
1191 ret = sprd_dma_enable(sdev);
1192 if (ret)
1193 dev_err(sdev->dma_dev.dev, "enable dma failed\n");
1194
1195 return ret;
1196}
1197
1198static const struct dev_pm_ops sprd_dma_pm_ops = {
1199 SET_RUNTIME_PM_OPS(sprd_dma_runtime_suspend,
1200 sprd_dma_runtime_resume,
1201 NULL)
1202};
1203
1204static struct platform_driver sprd_dma_driver = {
1205 .probe = sprd_dma_probe,
1206 .remove = sprd_dma_remove,
1207 .driver = {
1208 .name = "sprd-dma",
1209 .of_match_table = sprd_dma_match,
1210 .pm = &sprd_dma_pm_ops,
1211 },
1212};
1213module_platform_driver(sprd_dma_driver);
1214
1215MODULE_LICENSE("GPL v2");
1216MODULE_DESCRIPTION("DMA driver for Spreadtrum");
1217MODULE_AUTHOR("Baolin Wang <baolin.wang@spreadtrum.com>");
Eric Long53197122018-11-06 13:01:37 +08001218MODULE_AUTHOR("Eric Long <eric.long@spreadtrum.com>");
Baolin Wang9b3b8172017-10-24 13:47:50 +08001219MODULE_ALIAS("platform:sprd-dma");