blob: dcfa4179bf9ef94a673e539f36169ae622930096 [file] [log] [blame]
Baolin Wang9b3b8172017-10-24 13:47:50 +08001/*
2 * Copyright (C) 2017 Spreadtrum Communications Inc.
3 *
4 * SPDX-License-Identifier: GPL-2.0
5 */
6
7#include <linux/clk.h>
8#include <linux/dma-mapping.h>
9#include <linux/errno.h>
10#include <linux/init.h>
11#include <linux/interrupt.h>
12#include <linux/io.h>
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/of.h>
16#include <linux/of_dma.h>
17#include <linux/of_device.h>
18#include <linux/pm_runtime.h>
19#include <linux/slab.h>
20
21#include "virt-dma.h"
22
23#define SPRD_DMA_CHN_REG_OFFSET 0x1000
24#define SPRD_DMA_CHN_REG_LENGTH 0x40
25#define SPRD_DMA_MEMCPY_MIN_SIZE 64
26
27/* DMA global registers definition */
28#define SPRD_DMA_GLB_PAUSE 0x0
29#define SPRD_DMA_GLB_FRAG_WAIT 0x4
30#define SPRD_DMA_GLB_REQ_PEND0_EN 0x8
31#define SPRD_DMA_GLB_REQ_PEND1_EN 0xc
32#define SPRD_DMA_GLB_INT_RAW_STS 0x10
33#define SPRD_DMA_GLB_INT_MSK_STS 0x14
34#define SPRD_DMA_GLB_REQ_STS 0x18
35#define SPRD_DMA_GLB_CHN_EN_STS 0x1c
36#define SPRD_DMA_GLB_DEBUG_STS 0x20
37#define SPRD_DMA_GLB_ARB_SEL_STS 0x24
38#define SPRD_DMA_GLB_REQ_UID(uid) (0x4 * ((uid) - 1))
39#define SPRD_DMA_GLB_REQ_UID_OFFSET 0x2000
40
41/* DMA channel registers definition */
42#define SPRD_DMA_CHN_PAUSE 0x0
43#define SPRD_DMA_CHN_REQ 0x4
44#define SPRD_DMA_CHN_CFG 0x8
45#define SPRD_DMA_CHN_INTC 0xc
46#define SPRD_DMA_CHN_SRC_ADDR 0x10
47#define SPRD_DMA_CHN_DES_ADDR 0x14
48#define SPRD_DMA_CHN_FRG_LEN 0x18
49#define SPRD_DMA_CHN_BLK_LEN 0x1c
50#define SPRD_DMA_CHN_TRSC_LEN 0x20
51#define SPRD_DMA_CHN_TRSF_STEP 0x24
52#define SPRD_DMA_CHN_WARP_PTR 0x28
53#define SPRD_DMA_CHN_WARP_TO 0x2c
54#define SPRD_DMA_CHN_LLIST_PTR 0x30
55#define SPRD_DMA_CHN_FRAG_STEP 0x34
56#define SPRD_DMA_CHN_SRC_BLK_STEP 0x38
57#define SPRD_DMA_CHN_DES_BLK_STEP 0x3c
58
59/* SPRD_DMA_CHN_INTC register definition */
60#define SPRD_DMA_INT_MASK GENMASK(4, 0)
61#define SPRD_DMA_INT_CLR_OFFSET 24
62#define SPRD_DMA_FRAG_INT_EN BIT(0)
63#define SPRD_DMA_BLK_INT_EN BIT(1)
64#define SPRD_DMA_TRANS_INT_EN BIT(2)
65#define SPRD_DMA_LIST_INT_EN BIT(3)
66#define SPRD_DMA_CFG_ERR_INT_EN BIT(4)
67
68/* SPRD_DMA_CHN_CFG register definition */
69#define SPRD_DMA_CHN_EN BIT(0)
70#define SPRD_DMA_WAIT_BDONE_OFFSET 24
71#define SPRD_DMA_DONOT_WAIT_BDONE 1
72
73/* SPRD_DMA_CHN_REQ register definition */
74#define SPRD_DMA_REQ_EN BIT(0)
75
76/* SPRD_DMA_CHN_PAUSE register definition */
77#define SPRD_DMA_PAUSE_EN BIT(0)
78#define SPRD_DMA_PAUSE_STS BIT(2)
79#define SPRD_DMA_PAUSE_CNT 0x2000
80
81/* DMA_CHN_WARP_* register definition */
82#define SPRD_DMA_HIGH_ADDR_MASK GENMASK(31, 28)
83#define SPRD_DMA_LOW_ADDR_MASK GENMASK(31, 0)
84#define SPRD_DMA_HIGH_ADDR_OFFSET 4
85
86/* SPRD_DMA_CHN_INTC register definition */
87#define SPRD_DMA_FRAG_INT_STS BIT(16)
88#define SPRD_DMA_BLK_INT_STS BIT(17)
89#define SPRD_DMA_TRSC_INT_STS BIT(18)
90#define SPRD_DMA_LIST_INT_STS BIT(19)
91#define SPRD_DMA_CFGERR_INT_STS BIT(20)
92#define SPRD_DMA_CHN_INT_STS \
93 (SPRD_DMA_FRAG_INT_STS | SPRD_DMA_BLK_INT_STS | \
94 SPRD_DMA_TRSC_INT_STS | SPRD_DMA_LIST_INT_STS | \
95 SPRD_DMA_CFGERR_INT_STS)
96
97/* SPRD_DMA_CHN_FRG_LEN register definition */
98#define SPRD_DMA_SRC_DATAWIDTH_OFFSET 30
99#define SPRD_DMA_DES_DATAWIDTH_OFFSET 28
100#define SPRD_DMA_SWT_MODE_OFFSET 26
101#define SPRD_DMA_REQ_MODE_OFFSET 24
102#define SPRD_DMA_REQ_MODE_MASK GENMASK(1, 0)
103#define SPRD_DMA_FIX_SEL_OFFSET 21
104#define SPRD_DMA_FIX_EN_OFFSET 20
105#define SPRD_DMA_LLIST_END_OFFSET 19
106#define SPRD_DMA_FRG_LEN_MASK GENMASK(16, 0)
107
108/* SPRD_DMA_CHN_BLK_LEN register definition */
109#define SPRD_DMA_BLK_LEN_MASK GENMASK(16, 0)
110
111/* SPRD_DMA_CHN_TRSC_LEN register definition */
112#define SPRD_DMA_TRSC_LEN_MASK GENMASK(27, 0)
113
114/* SPRD_DMA_CHN_TRSF_STEP register definition */
115#define SPRD_DMA_DEST_TRSF_STEP_OFFSET 16
116#define SPRD_DMA_SRC_TRSF_STEP_OFFSET 0
117#define SPRD_DMA_TRSF_STEP_MASK GENMASK(15, 0)
118
Eric Long6b1d2552018-04-19 10:00:46 +0800119/* define the DMA transfer step type */
120#define SPRD_DMA_NONE_STEP 0
121#define SPRD_DMA_BYTE_STEP 1
122#define SPRD_DMA_SHORT_STEP 2
123#define SPRD_DMA_WORD_STEP 4
124#define SPRD_DMA_DWORD_STEP 8
125
Baolin Wang9b3b8172017-10-24 13:47:50 +0800126#define SPRD_DMA_SOFTWARE_UID 0
127
128/*
129 * enum sprd_dma_req_mode: define the DMA request mode
130 * @SPRD_DMA_FRAG_REQ: fragment request mode
131 * @SPRD_DMA_BLK_REQ: block request mode
132 * @SPRD_DMA_TRANS_REQ: transaction request mode
133 * @SPRD_DMA_LIST_REQ: link-list request mode
134 *
135 * We have 4 types request mode: fragment mode, block mode, transaction mode
136 * and linklist mode. One transaction can contain several blocks, one block can
137 * contain several fragments. Link-list mode means we can save several DMA
138 * configuration into one reserved memory, then DMA can fetch each DMA
139 * configuration automatically to start transfer.
140 */
141enum sprd_dma_req_mode {
142 SPRD_DMA_FRAG_REQ,
143 SPRD_DMA_BLK_REQ,
144 SPRD_DMA_TRANS_REQ,
145 SPRD_DMA_LIST_REQ,
146};
147
148/*
149 * enum sprd_dma_int_type: define the DMA interrupt type
150 * @SPRD_DMA_NO_INT: do not need generate DMA interrupts.
151 * @SPRD_DMA_FRAG_INT: fragment done interrupt when one fragment request
152 * is done.
153 * @SPRD_DMA_BLK_INT: block done interrupt when one block request is done.
154 * @SPRD_DMA_BLK_FRAG_INT: block and fragment interrupt when one fragment
155 * or one block request is done.
156 * @SPRD_DMA_TRANS_INT: tansaction done interrupt when one transaction
157 * request is done.
158 * @SPRD_DMA_TRANS_FRAG_INT: transaction and fragment interrupt when one
159 * transaction request or fragment request is done.
160 * @SPRD_DMA_TRANS_BLK_INT: transaction and block interrupt when one
161 * transaction request or block request is done.
162 * @SPRD_DMA_LIST_INT: link-list done interrupt when one link-list request
163 * is done.
164 * @SPRD_DMA_CFGERR_INT: configure error interrupt when configuration is
165 * incorrect.
166 */
167enum sprd_dma_int_type {
168 SPRD_DMA_NO_INT,
169 SPRD_DMA_FRAG_INT,
170 SPRD_DMA_BLK_INT,
171 SPRD_DMA_BLK_FRAG_INT,
172 SPRD_DMA_TRANS_INT,
173 SPRD_DMA_TRANS_FRAG_INT,
174 SPRD_DMA_TRANS_BLK_INT,
175 SPRD_DMA_LIST_INT,
176 SPRD_DMA_CFGERR_INT,
177};
178
179/* dma channel hardware configuration */
180struct sprd_dma_chn_hw {
181 u32 pause;
182 u32 req;
183 u32 cfg;
184 u32 intc;
185 u32 src_addr;
186 u32 des_addr;
187 u32 frg_len;
188 u32 blk_len;
189 u32 trsc_len;
190 u32 trsf_step;
191 u32 wrap_ptr;
192 u32 wrap_to;
193 u32 llist_ptr;
194 u32 frg_step;
195 u32 src_blk_step;
196 u32 des_blk_step;
197};
198
199/* dma request description */
200struct sprd_dma_desc {
201 struct virt_dma_desc vd;
202 struct sprd_dma_chn_hw chn_hw;
203};
204
205/* dma channel description */
206struct sprd_dma_chn {
207 struct virt_dma_chan vc;
208 void __iomem *chn_base;
209 u32 chn_num;
210 u32 dev_id;
211 struct sprd_dma_desc *cur_desc;
212};
213
214/* SPRD dma device */
215struct sprd_dma_dev {
216 struct dma_device dma_dev;
217 void __iomem *glb_base;
218 struct clk *clk;
219 struct clk *ashb_clk;
220 int irq;
221 u32 total_chns;
222 struct sprd_dma_chn channels[0];
223};
224
225static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param);
226static struct of_dma_filter_info sprd_dma_info = {
227 .filter_fn = sprd_dma_filter_fn,
228};
229
230static inline struct sprd_dma_chn *to_sprd_dma_chan(struct dma_chan *c)
231{
232 return container_of(c, struct sprd_dma_chn, vc.chan);
233}
234
235static inline struct sprd_dma_dev *to_sprd_dma_dev(struct dma_chan *c)
236{
237 struct sprd_dma_chn *schan = to_sprd_dma_chan(c);
238
239 return container_of(schan, struct sprd_dma_dev, channels[c->chan_id]);
240}
241
242static inline struct sprd_dma_desc *to_sprd_dma_desc(struct virt_dma_desc *vd)
243{
244 return container_of(vd, struct sprd_dma_desc, vd);
245}
246
247static void sprd_dma_chn_update(struct sprd_dma_chn *schan, u32 reg,
248 u32 mask, u32 val)
249{
250 u32 orig = readl(schan->chn_base + reg);
251 u32 tmp;
252
253 tmp = (orig & ~mask) | val;
254 writel(tmp, schan->chn_base + reg);
255}
256
257static int sprd_dma_enable(struct sprd_dma_dev *sdev)
258{
259 int ret;
260
261 ret = clk_prepare_enable(sdev->clk);
262 if (ret)
263 return ret;
264
265 /*
266 * The ashb_clk is optional and only for AGCP DMA controller, so we
267 * need add one condition to check if the ashb_clk need enable.
268 */
269 if (!IS_ERR(sdev->ashb_clk))
270 ret = clk_prepare_enable(sdev->ashb_clk);
271
272 return ret;
273}
274
275static void sprd_dma_disable(struct sprd_dma_dev *sdev)
276{
277 clk_disable_unprepare(sdev->clk);
278
279 /*
280 * Need to check if we need disable the optional ashb_clk for AGCP DMA.
281 */
282 if (!IS_ERR(sdev->ashb_clk))
283 clk_disable_unprepare(sdev->ashb_clk);
284}
285
286static void sprd_dma_set_uid(struct sprd_dma_chn *schan)
287{
288 struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
289 u32 dev_id = schan->dev_id;
290
291 if (dev_id != SPRD_DMA_SOFTWARE_UID) {
292 u32 uid_offset = SPRD_DMA_GLB_REQ_UID_OFFSET +
293 SPRD_DMA_GLB_REQ_UID(dev_id);
294
295 writel(schan->chn_num + 1, sdev->glb_base + uid_offset);
296 }
297}
298
299static void sprd_dma_unset_uid(struct sprd_dma_chn *schan)
300{
301 struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
302 u32 dev_id = schan->dev_id;
303
304 if (dev_id != SPRD_DMA_SOFTWARE_UID) {
305 u32 uid_offset = SPRD_DMA_GLB_REQ_UID_OFFSET +
306 SPRD_DMA_GLB_REQ_UID(dev_id);
307
308 writel(0, sdev->glb_base + uid_offset);
309 }
310}
311
312static void sprd_dma_clear_int(struct sprd_dma_chn *schan)
313{
314 sprd_dma_chn_update(schan, SPRD_DMA_CHN_INTC,
315 SPRD_DMA_INT_MASK << SPRD_DMA_INT_CLR_OFFSET,
316 SPRD_DMA_INT_MASK << SPRD_DMA_INT_CLR_OFFSET);
317}
318
319static void sprd_dma_enable_chn(struct sprd_dma_chn *schan)
320{
321 sprd_dma_chn_update(schan, SPRD_DMA_CHN_CFG, SPRD_DMA_CHN_EN,
322 SPRD_DMA_CHN_EN);
323}
324
325static void sprd_dma_disable_chn(struct sprd_dma_chn *schan)
326{
327 sprd_dma_chn_update(schan, SPRD_DMA_CHN_CFG, SPRD_DMA_CHN_EN, 0);
328}
329
330static void sprd_dma_soft_request(struct sprd_dma_chn *schan)
331{
332 sprd_dma_chn_update(schan, SPRD_DMA_CHN_REQ, SPRD_DMA_REQ_EN,
333 SPRD_DMA_REQ_EN);
334}
335
336static void sprd_dma_pause_resume(struct sprd_dma_chn *schan, bool enable)
337{
338 struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
339 u32 pause, timeout = SPRD_DMA_PAUSE_CNT;
340
341 if (enable) {
342 sprd_dma_chn_update(schan, SPRD_DMA_CHN_PAUSE,
343 SPRD_DMA_PAUSE_EN, SPRD_DMA_PAUSE_EN);
344
345 do {
346 pause = readl(schan->chn_base + SPRD_DMA_CHN_PAUSE);
347 if (pause & SPRD_DMA_PAUSE_STS)
348 break;
349
350 cpu_relax();
351 } while (--timeout > 0);
352
353 if (!timeout)
354 dev_warn(sdev->dma_dev.dev,
355 "pause dma controller timeout\n");
356 } else {
357 sprd_dma_chn_update(schan, SPRD_DMA_CHN_PAUSE,
358 SPRD_DMA_PAUSE_EN, 0);
359 }
360}
361
362static void sprd_dma_stop_and_disable(struct sprd_dma_chn *schan)
363{
364 u32 cfg = readl(schan->chn_base + SPRD_DMA_CHN_CFG);
365
366 if (!(cfg & SPRD_DMA_CHN_EN))
367 return;
368
369 sprd_dma_pause_resume(schan, true);
370 sprd_dma_disable_chn(schan);
371}
372
373static unsigned long sprd_dma_get_dst_addr(struct sprd_dma_chn *schan)
374{
375 unsigned long addr, addr_high;
376
377 addr = readl(schan->chn_base + SPRD_DMA_CHN_DES_ADDR);
378 addr_high = readl(schan->chn_base + SPRD_DMA_CHN_WARP_TO) &
379 SPRD_DMA_HIGH_ADDR_MASK;
380
381 return addr | (addr_high << SPRD_DMA_HIGH_ADDR_OFFSET);
382}
383
384static enum sprd_dma_int_type sprd_dma_get_int_type(struct sprd_dma_chn *schan)
385{
386 struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
387 u32 intc_sts = readl(schan->chn_base + SPRD_DMA_CHN_INTC) &
388 SPRD_DMA_CHN_INT_STS;
389
390 switch (intc_sts) {
391 case SPRD_DMA_CFGERR_INT_STS:
392 return SPRD_DMA_CFGERR_INT;
393
394 case SPRD_DMA_LIST_INT_STS:
395 return SPRD_DMA_LIST_INT;
396
397 case SPRD_DMA_TRSC_INT_STS:
398 return SPRD_DMA_TRANS_INT;
399
400 case SPRD_DMA_BLK_INT_STS:
401 return SPRD_DMA_BLK_INT;
402
403 case SPRD_DMA_FRAG_INT_STS:
404 return SPRD_DMA_FRAG_INT;
405
406 default:
407 dev_warn(sdev->dma_dev.dev, "incorrect dma interrupt type\n");
408 return SPRD_DMA_NO_INT;
409 }
410}
411
412static enum sprd_dma_req_mode sprd_dma_get_req_type(struct sprd_dma_chn *schan)
413{
414 u32 frag_reg = readl(schan->chn_base + SPRD_DMA_CHN_FRG_LEN);
415
416 return (frag_reg >> SPRD_DMA_REQ_MODE_OFFSET) & SPRD_DMA_REQ_MODE_MASK;
417}
418
419static void sprd_dma_set_chn_config(struct sprd_dma_chn *schan,
420 struct sprd_dma_desc *sdesc)
421{
422 struct sprd_dma_chn_hw *cfg = &sdesc->chn_hw;
423
424 writel(cfg->pause, schan->chn_base + SPRD_DMA_CHN_PAUSE);
425 writel(cfg->cfg, schan->chn_base + SPRD_DMA_CHN_CFG);
426 writel(cfg->intc, schan->chn_base + SPRD_DMA_CHN_INTC);
427 writel(cfg->src_addr, schan->chn_base + SPRD_DMA_CHN_SRC_ADDR);
428 writel(cfg->des_addr, schan->chn_base + SPRD_DMA_CHN_DES_ADDR);
429 writel(cfg->frg_len, schan->chn_base + SPRD_DMA_CHN_FRG_LEN);
430 writel(cfg->blk_len, schan->chn_base + SPRD_DMA_CHN_BLK_LEN);
431 writel(cfg->trsc_len, schan->chn_base + SPRD_DMA_CHN_TRSC_LEN);
432 writel(cfg->trsf_step, schan->chn_base + SPRD_DMA_CHN_TRSF_STEP);
433 writel(cfg->wrap_ptr, schan->chn_base + SPRD_DMA_CHN_WARP_PTR);
434 writel(cfg->wrap_to, schan->chn_base + SPRD_DMA_CHN_WARP_TO);
435 writel(cfg->llist_ptr, schan->chn_base + SPRD_DMA_CHN_LLIST_PTR);
436 writel(cfg->frg_step, schan->chn_base + SPRD_DMA_CHN_FRAG_STEP);
437 writel(cfg->src_blk_step, schan->chn_base + SPRD_DMA_CHN_SRC_BLK_STEP);
438 writel(cfg->des_blk_step, schan->chn_base + SPRD_DMA_CHN_DES_BLK_STEP);
439 writel(cfg->req, schan->chn_base + SPRD_DMA_CHN_REQ);
440}
441
442static void sprd_dma_start(struct sprd_dma_chn *schan)
443{
444 struct virt_dma_desc *vd = vchan_next_desc(&schan->vc);
445
446 if (!vd)
447 return;
448
449 list_del(&vd->node);
450 schan->cur_desc = to_sprd_dma_desc(vd);
451
452 /*
453 * Copy the DMA configuration from DMA descriptor to this hardware
454 * channel.
455 */
456 sprd_dma_set_chn_config(schan, schan->cur_desc);
457 sprd_dma_set_uid(schan);
458 sprd_dma_enable_chn(schan);
459
460 if (schan->dev_id == SPRD_DMA_SOFTWARE_UID)
461 sprd_dma_soft_request(schan);
462}
463
464static void sprd_dma_stop(struct sprd_dma_chn *schan)
465{
466 sprd_dma_stop_and_disable(schan);
467 sprd_dma_unset_uid(schan);
468 sprd_dma_clear_int(schan);
469}
470
471static bool sprd_dma_check_trans_done(struct sprd_dma_desc *sdesc,
472 enum sprd_dma_int_type int_type,
473 enum sprd_dma_req_mode req_mode)
474{
475 if (int_type == SPRD_DMA_NO_INT)
476 return false;
477
478 if (int_type >= req_mode + 1)
479 return true;
480 else
481 return false;
482}
483
484static irqreturn_t dma_irq_handle(int irq, void *dev_id)
485{
486 struct sprd_dma_dev *sdev = (struct sprd_dma_dev *)dev_id;
487 u32 irq_status = readl(sdev->glb_base + SPRD_DMA_GLB_INT_MSK_STS);
488 struct sprd_dma_chn *schan;
489 struct sprd_dma_desc *sdesc;
490 enum sprd_dma_req_mode req_type;
491 enum sprd_dma_int_type int_type;
492 bool trans_done = false;
493 u32 i;
494
495 while (irq_status) {
496 i = __ffs(irq_status);
497 irq_status &= (irq_status - 1);
498 schan = &sdev->channels[i];
499
500 spin_lock(&schan->vc.lock);
501 int_type = sprd_dma_get_int_type(schan);
502 req_type = sprd_dma_get_req_type(schan);
503 sprd_dma_clear_int(schan);
504
505 sdesc = schan->cur_desc;
506
507 /* Check if the dma request descriptor is done. */
508 trans_done = sprd_dma_check_trans_done(sdesc, int_type,
509 req_type);
510 if (trans_done == true) {
511 vchan_cookie_complete(&sdesc->vd);
512 schan->cur_desc = NULL;
513 sprd_dma_start(schan);
514 }
515 spin_unlock(&schan->vc.lock);
516 }
517
518 return IRQ_HANDLED;
519}
520
521static int sprd_dma_alloc_chan_resources(struct dma_chan *chan)
522{
523 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
524 int ret;
525
526 ret = pm_runtime_get_sync(chan->device->dev);
527 if (ret < 0)
528 return ret;
529
530 schan->dev_id = SPRD_DMA_SOFTWARE_UID;
531 return 0;
532}
533
534static void sprd_dma_free_chan_resources(struct dma_chan *chan)
535{
536 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
537 unsigned long flags;
538
539 spin_lock_irqsave(&schan->vc.lock, flags);
540 sprd_dma_stop(schan);
541 spin_unlock_irqrestore(&schan->vc.lock, flags);
542
543 vchan_free_chan_resources(&schan->vc);
544 pm_runtime_put(chan->device->dev);
545}
546
547static enum dma_status sprd_dma_tx_status(struct dma_chan *chan,
548 dma_cookie_t cookie,
549 struct dma_tx_state *txstate)
550{
551 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
552 struct virt_dma_desc *vd;
553 unsigned long flags;
554 enum dma_status ret;
555 u32 pos;
556
557 ret = dma_cookie_status(chan, cookie, txstate);
558 if (ret == DMA_COMPLETE || !txstate)
559 return ret;
560
561 spin_lock_irqsave(&schan->vc.lock, flags);
562 vd = vchan_find_desc(&schan->vc, cookie);
563 if (vd) {
564 struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd);
565 struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
566
567 if (hw->trsc_len > 0)
568 pos = hw->trsc_len;
569 else if (hw->blk_len > 0)
570 pos = hw->blk_len;
571 else if (hw->frg_len > 0)
572 pos = hw->frg_len;
573 else
574 pos = 0;
575 } else if (schan->cur_desc && schan->cur_desc->vd.tx.cookie == cookie) {
576 pos = sprd_dma_get_dst_addr(schan);
577 } else {
578 pos = 0;
579 }
580 spin_unlock_irqrestore(&schan->vc.lock, flags);
581
582 dma_set_residue(txstate, pos);
583 return ret;
584}
585
586static void sprd_dma_issue_pending(struct dma_chan *chan)
587{
588 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
589 unsigned long flags;
590
591 spin_lock_irqsave(&schan->vc.lock, flags);
592 if (vchan_issue_pending(&schan->vc) && !schan->cur_desc)
593 sprd_dma_start(schan);
594 spin_unlock_irqrestore(&schan->vc.lock, flags);
595}
596
597static int sprd_dma_config(struct dma_chan *chan, struct sprd_dma_desc *sdesc,
598 dma_addr_t dest, dma_addr_t src, size_t len)
599{
600 struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
601 struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
602 u32 datawidth, src_step, des_step, fragment_len;
603 u32 block_len, req_mode, irq_mode, transcation_len;
604 u32 fix_mode = 0, fix_en = 0;
605
606 if (IS_ALIGNED(len, 4)) {
607 datawidth = 2;
Eric Long6b1d2552018-04-19 10:00:46 +0800608 src_step = SPRD_DMA_WORD_STEP;
609 des_step = SPRD_DMA_WORD_STEP;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800610 } else if (IS_ALIGNED(len, 2)) {
611 datawidth = 1;
Eric Long6b1d2552018-04-19 10:00:46 +0800612 src_step = SPRD_DMA_SHORT_STEP;
613 des_step = SPRD_DMA_SHORT_STEP;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800614 } else {
615 datawidth = 0;
Eric Long6b1d2552018-04-19 10:00:46 +0800616 src_step = SPRD_DMA_BYTE_STEP;
617 des_step = SPRD_DMA_BYTE_STEP;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800618 }
619
620 fragment_len = SPRD_DMA_MEMCPY_MIN_SIZE;
621 if (len <= SPRD_DMA_BLK_LEN_MASK) {
622 block_len = len;
623 transcation_len = 0;
624 req_mode = SPRD_DMA_BLK_REQ;
625 irq_mode = SPRD_DMA_BLK_INT;
626 } else {
627 block_len = SPRD_DMA_MEMCPY_MIN_SIZE;
628 transcation_len = len;
629 req_mode = SPRD_DMA_TRANS_REQ;
630 irq_mode = SPRD_DMA_TRANS_INT;
631 }
632
633 hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
634 hw->wrap_ptr = (u32)((src >> SPRD_DMA_HIGH_ADDR_OFFSET) &
635 SPRD_DMA_HIGH_ADDR_MASK);
636 hw->wrap_to = (u32)((dest >> SPRD_DMA_HIGH_ADDR_OFFSET) &
637 SPRD_DMA_HIGH_ADDR_MASK);
638
639 hw->src_addr = (u32)(src & SPRD_DMA_LOW_ADDR_MASK);
640 hw->des_addr = (u32)(dest & SPRD_DMA_LOW_ADDR_MASK);
641
642 if ((src_step != 0 && des_step != 0) || (src_step | des_step) == 0) {
643 fix_en = 0;
644 } else {
645 fix_en = 1;
646 if (src_step)
647 fix_mode = 1;
648 else
649 fix_mode = 0;
650 }
651
652 hw->frg_len = datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET |
653 datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET |
654 req_mode << SPRD_DMA_REQ_MODE_OFFSET |
655 fix_mode << SPRD_DMA_FIX_SEL_OFFSET |
656 fix_en << SPRD_DMA_FIX_EN_OFFSET |
657 (fragment_len & SPRD_DMA_FRG_LEN_MASK);
658 hw->blk_len = block_len & SPRD_DMA_BLK_LEN_MASK;
659
660 hw->intc = SPRD_DMA_CFG_ERR_INT_EN;
661
662 switch (irq_mode) {
663 case SPRD_DMA_NO_INT:
664 break;
665
666 case SPRD_DMA_FRAG_INT:
667 hw->intc |= SPRD_DMA_FRAG_INT_EN;
668 break;
669
670 case SPRD_DMA_BLK_INT:
671 hw->intc |= SPRD_DMA_BLK_INT_EN;
672 break;
673
674 case SPRD_DMA_BLK_FRAG_INT:
675 hw->intc |= SPRD_DMA_BLK_INT_EN | SPRD_DMA_FRAG_INT_EN;
676 break;
677
678 case SPRD_DMA_TRANS_INT:
679 hw->intc |= SPRD_DMA_TRANS_INT_EN;
680 break;
681
682 case SPRD_DMA_TRANS_FRAG_INT:
683 hw->intc |= SPRD_DMA_TRANS_INT_EN | SPRD_DMA_FRAG_INT_EN;
684 break;
685
686 case SPRD_DMA_TRANS_BLK_INT:
687 hw->intc |= SPRD_DMA_TRANS_INT_EN | SPRD_DMA_BLK_INT_EN;
688 break;
689
690 case SPRD_DMA_LIST_INT:
691 hw->intc |= SPRD_DMA_LIST_INT_EN;
692 break;
693
694 case SPRD_DMA_CFGERR_INT:
695 hw->intc |= SPRD_DMA_CFG_ERR_INT_EN;
696 break;
697
698 default:
699 dev_err(sdev->dma_dev.dev, "invalid irq mode\n");
700 return -EINVAL;
701 }
702
703 if (transcation_len == 0)
704 hw->trsc_len = block_len & SPRD_DMA_TRSC_LEN_MASK;
705 else
706 hw->trsc_len = transcation_len & SPRD_DMA_TRSC_LEN_MASK;
707
708 hw->trsf_step = (des_step & SPRD_DMA_TRSF_STEP_MASK) <<
709 SPRD_DMA_DEST_TRSF_STEP_OFFSET |
710 (src_step & SPRD_DMA_TRSF_STEP_MASK) <<
711 SPRD_DMA_SRC_TRSF_STEP_OFFSET;
712
713 hw->frg_step = 0;
714 hw->src_blk_step = 0;
715 hw->des_blk_step = 0;
716 hw->src_blk_step = 0;
717 return 0;
718}
719
Vinod Koul1ab8da12018-01-12 22:31:17 +0530720static struct dma_async_tx_descriptor *
Baolin Wang9b3b8172017-10-24 13:47:50 +0800721sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
722 size_t len, unsigned long flags)
723{
724 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
725 struct sprd_dma_desc *sdesc;
726 int ret;
727
728 sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
729 if (!sdesc)
730 return NULL;
731
732 ret = sprd_dma_config(chan, sdesc, dest, src, len);
733 if (ret) {
734 kfree(sdesc);
735 return NULL;
736 }
737
738 return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
739}
740
741static int sprd_dma_pause(struct dma_chan *chan)
742{
743 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
744 unsigned long flags;
745
746 spin_lock_irqsave(&schan->vc.lock, flags);
747 sprd_dma_pause_resume(schan, true);
748 spin_unlock_irqrestore(&schan->vc.lock, flags);
749
750 return 0;
751}
752
753static int sprd_dma_resume(struct dma_chan *chan)
754{
755 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
756 unsigned long flags;
757
758 spin_lock_irqsave(&schan->vc.lock, flags);
759 sprd_dma_pause_resume(schan, false);
760 spin_unlock_irqrestore(&schan->vc.lock, flags);
761
762 return 0;
763}
764
765static int sprd_dma_terminate_all(struct dma_chan *chan)
766{
767 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
768 unsigned long flags;
769 LIST_HEAD(head);
770
771 spin_lock_irqsave(&schan->vc.lock, flags);
772 sprd_dma_stop(schan);
773
774 vchan_get_all_descriptors(&schan->vc, &head);
775 spin_unlock_irqrestore(&schan->vc.lock, flags);
776
777 vchan_dma_desc_free_list(&schan->vc, &head);
778 return 0;
779}
780
781static void sprd_dma_free_desc(struct virt_dma_desc *vd)
782{
783 struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd);
784
785 kfree(sdesc);
786}
787
788static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param)
789{
790 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
791 struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
792 u32 req = *(u32 *)param;
793
794 if (req < sdev->total_chns)
795 return req == schan->chn_num + 1;
796 else
797 return false;
798}
799
800static int sprd_dma_probe(struct platform_device *pdev)
801{
802 struct device_node *np = pdev->dev.of_node;
803 struct sprd_dma_dev *sdev;
804 struct sprd_dma_chn *dma_chn;
805 struct resource *res;
806 u32 chn_count;
807 int ret, i;
808
809 ret = device_property_read_u32(&pdev->dev, "#dma-channels", &chn_count);
810 if (ret) {
811 dev_err(&pdev->dev, "get dma channels count failed\n");
812 return ret;
813 }
814
815 sdev = devm_kzalloc(&pdev->dev, sizeof(*sdev) +
816 sizeof(*dma_chn) * chn_count,
817 GFP_KERNEL);
818 if (!sdev)
819 return -ENOMEM;
820
821 sdev->clk = devm_clk_get(&pdev->dev, "enable");
822 if (IS_ERR(sdev->clk)) {
823 dev_err(&pdev->dev, "get enable clock failed\n");
824 return PTR_ERR(sdev->clk);
825 }
826
827 /* ashb clock is optional for AGCP DMA */
828 sdev->ashb_clk = devm_clk_get(&pdev->dev, "ashb_eb");
829 if (IS_ERR(sdev->ashb_clk))
830 dev_warn(&pdev->dev, "no optional ashb eb clock\n");
831
832 /*
833 * We have three DMA controllers: AP DMA, AON DMA and AGCP DMA. For AGCP
834 * DMA controller, it can or do not request the irq, which will save
835 * system power without resuming system by DMA interrupts if AGCP DMA
836 * does not request the irq. Thus the DMA interrupts property should
837 * be optional.
838 */
839 sdev->irq = platform_get_irq(pdev, 0);
840 if (sdev->irq > 0) {
841 ret = devm_request_irq(&pdev->dev, sdev->irq, dma_irq_handle,
842 0, "sprd_dma", (void *)sdev);
843 if (ret < 0) {
844 dev_err(&pdev->dev, "request dma irq failed\n");
845 return ret;
846 }
847 } else {
848 dev_warn(&pdev->dev, "no interrupts for the dma controller\n");
849 }
850
851 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
852 sdev->glb_base = devm_ioremap_nocache(&pdev->dev, res->start,
853 resource_size(res));
854 if (!sdev->glb_base)
855 return -ENOMEM;
856
857 dma_cap_set(DMA_MEMCPY, sdev->dma_dev.cap_mask);
858 sdev->total_chns = chn_count;
859 sdev->dma_dev.chancnt = chn_count;
860 INIT_LIST_HEAD(&sdev->dma_dev.channels);
861 INIT_LIST_HEAD(&sdev->dma_dev.global_node);
862 sdev->dma_dev.dev = &pdev->dev;
863 sdev->dma_dev.device_alloc_chan_resources = sprd_dma_alloc_chan_resources;
864 sdev->dma_dev.device_free_chan_resources = sprd_dma_free_chan_resources;
865 sdev->dma_dev.device_tx_status = sprd_dma_tx_status;
866 sdev->dma_dev.device_issue_pending = sprd_dma_issue_pending;
867 sdev->dma_dev.device_prep_dma_memcpy = sprd_dma_prep_dma_memcpy;
868 sdev->dma_dev.device_pause = sprd_dma_pause;
869 sdev->dma_dev.device_resume = sprd_dma_resume;
870 sdev->dma_dev.device_terminate_all = sprd_dma_terminate_all;
871
872 for (i = 0; i < chn_count; i++) {
873 dma_chn = &sdev->channels[i];
874 dma_chn->chn_num = i;
875 dma_chn->cur_desc = NULL;
876 /* get each channel's registers base address. */
877 dma_chn->chn_base = sdev->glb_base + SPRD_DMA_CHN_REG_OFFSET +
878 SPRD_DMA_CHN_REG_LENGTH * i;
879
880 dma_chn->vc.desc_free = sprd_dma_free_desc;
881 vchan_init(&dma_chn->vc, &sdev->dma_dev);
882 }
883
884 platform_set_drvdata(pdev, sdev);
885 ret = sprd_dma_enable(sdev);
886 if (ret)
887 return ret;
888
889 pm_runtime_set_active(&pdev->dev);
890 pm_runtime_enable(&pdev->dev);
891
892 ret = pm_runtime_get_sync(&pdev->dev);
893 if (ret < 0)
894 goto err_rpm;
895
896 ret = dma_async_device_register(&sdev->dma_dev);
897 if (ret < 0) {
898 dev_err(&pdev->dev, "register dma device failed:%d\n", ret);
899 goto err_register;
900 }
901
902 sprd_dma_info.dma_cap = sdev->dma_dev.cap_mask;
903 ret = of_dma_controller_register(np, of_dma_simple_xlate,
904 &sprd_dma_info);
905 if (ret)
906 goto err_of_register;
907
908 pm_runtime_put(&pdev->dev);
909 return 0;
910
911err_of_register:
912 dma_async_device_unregister(&sdev->dma_dev);
913err_register:
914 pm_runtime_put_noidle(&pdev->dev);
915 pm_runtime_disable(&pdev->dev);
916err_rpm:
917 sprd_dma_disable(sdev);
918 return ret;
919}
920
921static int sprd_dma_remove(struct platform_device *pdev)
922{
923 struct sprd_dma_dev *sdev = platform_get_drvdata(pdev);
924 struct sprd_dma_chn *c, *cn;
925 int ret;
926
927 ret = pm_runtime_get_sync(&pdev->dev);
928 if (ret < 0)
929 return ret;
930
931 /* explicitly free the irq */
932 if (sdev->irq > 0)
933 devm_free_irq(&pdev->dev, sdev->irq, sdev);
934
935 list_for_each_entry_safe(c, cn, &sdev->dma_dev.channels,
936 vc.chan.device_node) {
937 list_del(&c->vc.chan.device_node);
938 tasklet_kill(&c->vc.task);
939 }
940
941 of_dma_controller_free(pdev->dev.of_node);
942 dma_async_device_unregister(&sdev->dma_dev);
943 sprd_dma_disable(sdev);
944
945 pm_runtime_put_noidle(&pdev->dev);
946 pm_runtime_disable(&pdev->dev);
947 return 0;
948}
949
950static const struct of_device_id sprd_dma_match[] = {
951 { .compatible = "sprd,sc9860-dma", },
952 {},
953};
954
955static int __maybe_unused sprd_dma_runtime_suspend(struct device *dev)
956{
957 struct sprd_dma_dev *sdev = dev_get_drvdata(dev);
958
959 sprd_dma_disable(sdev);
960 return 0;
961}
962
963static int __maybe_unused sprd_dma_runtime_resume(struct device *dev)
964{
965 struct sprd_dma_dev *sdev = dev_get_drvdata(dev);
966 int ret;
967
968 ret = sprd_dma_enable(sdev);
969 if (ret)
970 dev_err(sdev->dma_dev.dev, "enable dma failed\n");
971
972 return ret;
973}
974
975static const struct dev_pm_ops sprd_dma_pm_ops = {
976 SET_RUNTIME_PM_OPS(sprd_dma_runtime_suspend,
977 sprd_dma_runtime_resume,
978 NULL)
979};
980
981static struct platform_driver sprd_dma_driver = {
982 .probe = sprd_dma_probe,
983 .remove = sprd_dma_remove,
984 .driver = {
985 .name = "sprd-dma",
986 .of_match_table = sprd_dma_match,
987 .pm = &sprd_dma_pm_ops,
988 },
989};
990module_platform_driver(sprd_dma_driver);
991
992MODULE_LICENSE("GPL v2");
993MODULE_DESCRIPTION("DMA driver for Spreadtrum");
994MODULE_AUTHOR("Baolin Wang <baolin.wang@spreadtrum.com>");
995MODULE_ALIAS("platform:sprd-dma");