blob: 1e6aac87302d35400929549543f0c5c59fe8f720 [file] [log] [blame]
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5 */
6
7#include <linux/kernel.h>
8#include <linux/dmaengine.h>
9#include <linux/dma-mapping.h>
10#include <linux/dmapool.h>
11#include <linux/err.h>
12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <linux/list.h>
15#include <linux/platform_device.h>
16#include <linux/slab.h>
17#include <linux/spinlock.h>
18#include <linux/of.h>
19#include <linux/of_dma.h>
20#include <linux/of_device.h>
21#include <linux/of_irq.h>
22#include <linux/workqueue.h>
23#include <linux/completion.h>
24#include <linux/soc/ti/k3-ringacc.h>
25#include <linux/soc/ti/ti_sci_protocol.h>
26#include <linux/soc/ti/ti_sci_inta_msi.h>
27#include <linux/dma/ti-cppi5.h>
28
29#include "../virt-dma.h"
30#include "k3-udma.h"
31#include "k3-psil-priv.h"
32
33struct udma_static_tr {
34 u8 elsize; /* RPSTR0 */
35 u16 elcnt; /* RPSTR0 */
36 u16 bstcnt; /* RPSTR1 */
37};
38
39#define K3_UDMA_MAX_RFLOWS 1024
40#define K3_UDMA_DEFAULT_RING_SIZE 16
41
42/* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
43#define UDMA_RFLOW_SRCTAG_NONE 0
44#define UDMA_RFLOW_SRCTAG_CFG_TAG 1
45#define UDMA_RFLOW_SRCTAG_FLOW_ID 2
46#define UDMA_RFLOW_SRCTAG_SRC_TAG 4
47
48#define UDMA_RFLOW_DSTTAG_NONE 0
49#define UDMA_RFLOW_DSTTAG_CFG_TAG 1
50#define UDMA_RFLOW_DSTTAG_FLOW_ID 2
51#define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4
52#define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5
53
54struct udma_chan;
55
56enum udma_mmr {
57 MMR_GCFG = 0,
58 MMR_RCHANRT,
59 MMR_TCHANRT,
60 MMR_LAST,
61};
62
63static const char * const mmr_names[] = { "gcfg", "rchanrt", "tchanrt" };
64
65struct udma_tchan {
66 void __iomem *reg_rt;
67
68 int id;
69 struct k3_ring *t_ring; /* Transmit ring */
70 struct k3_ring *tc_ring; /* Transmit Completion ring */
71};
72
73struct udma_rflow {
74 int id;
75 struct k3_ring *fd_ring; /* Free Descriptor ring */
76 struct k3_ring *r_ring; /* Receive ring */
77};
78
79struct udma_rchan {
80 void __iomem *reg_rt;
81
82 int id;
83};
84
85#define UDMA_FLAG_PDMA_ACC32 BIT(0)
86#define UDMA_FLAG_PDMA_BURST BIT(1)
87
88struct udma_match_data {
89 u32 psil_base;
90 bool enable_memcpy_support;
91 u32 flags;
92 u32 statictr_z_mask;
93 u32 rchan_oes_offset;
94
95 u8 tpl_levels;
96 u32 level_start_idx[];
97};
98
99struct udma_dev {
100 struct dma_device ddev;
101 struct device *dev;
102 void __iomem *mmrs[MMR_LAST];
103 const struct udma_match_data *match_data;
104
105 size_t desc_align; /* alignment to use for descriptors */
106
107 struct udma_tisci_rm tisci_rm;
108
109 struct k3_ringacc *ringacc;
110
111 struct work_struct purge_work;
112 struct list_head desc_to_purge;
113 spinlock_t lock;
114
115 int tchan_cnt;
116 int echan_cnt;
117 int rchan_cnt;
118 int rflow_cnt;
119 unsigned long *tchan_map;
120 unsigned long *rchan_map;
121 unsigned long *rflow_gp_map;
122 unsigned long *rflow_gp_map_allocated;
123 unsigned long *rflow_in_use;
124
125 struct udma_tchan *tchans;
126 struct udma_rchan *rchans;
127 struct udma_rflow *rflows;
128
129 struct udma_chan *channels;
130 u32 psil_base;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +0200131 u32 atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200132};
133
134struct udma_hwdesc {
135 size_t cppi5_desc_size;
136 void *cppi5_desc_vaddr;
137 dma_addr_t cppi5_desc_paddr;
138
139 /* TR descriptor internal pointers */
140 void *tr_req_base;
141 struct cppi5_tr_resp_t *tr_resp_base;
142};
143
144struct udma_desc {
145 struct virt_dma_desc vd;
146
147 bool terminated;
148
149 enum dma_transfer_direction dir;
150
151 struct udma_static_tr static_tr;
152 u32 residue;
153
154 unsigned int sglen;
155 unsigned int desc_idx; /* Only used for cyclic in packet mode */
156 unsigned int tr_idx;
157
158 u32 metadata_size;
159 void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */
160
161 unsigned int hwdesc_count;
162 struct udma_hwdesc hwdesc[0];
163};
164
165enum udma_chan_state {
166 UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */
167 UDMA_CHAN_IS_ACTIVE, /* Normal operation */
168 UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */
169};
170
171struct udma_tx_drain {
172 struct delayed_work work;
173 unsigned long jiffie;
174 u32 residue;
175};
176
177struct udma_chan_config {
178 bool pkt_mode; /* TR or packet */
179 bool needs_epib; /* EPIB is needed for the communication or not */
180 u32 psd_size; /* size of Protocol Specific Data */
181 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
182 u32 hdesc_size; /* Size of a packet descriptor in packet mode */
183 bool notdpkt; /* Suppress sending TDC packet */
184 int remote_thread_id;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +0200185 u32 atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200186 u32 src_thread;
187 u32 dst_thread;
188 enum psil_endpoint_type ep_type;
189 bool enable_acc32;
190 bool enable_burst;
191 enum udma_tp_level channel_tpl; /* Channel Throughput Level */
192
193 enum dma_transfer_direction dir;
194};
195
196struct udma_chan {
197 struct virt_dma_chan vc;
198 struct dma_slave_config cfg;
199 struct udma_dev *ud;
200 struct udma_desc *desc;
201 struct udma_desc *terminated_desc;
202 struct udma_static_tr static_tr;
203 char *name;
204
205 struct udma_tchan *tchan;
206 struct udma_rchan *rchan;
207 struct udma_rflow *rflow;
208
209 bool psil_paired;
210
211 int irq_num_ring;
212 int irq_num_udma;
213
214 bool cyclic;
215 bool paused;
216
217 enum udma_chan_state state;
218 struct completion teardown_completed;
219
220 struct udma_tx_drain tx_drain;
221
222 u32 bcnt; /* number of bytes completed since the start of the channel */
223 u32 in_ring_cnt; /* number of descriptors in flight */
224
225 /* Channel configuration parameters */
226 struct udma_chan_config config;
227
228 /* dmapool for packet mode descriptors */
229 bool use_dma_pool;
230 struct dma_pool *hdesc_pool;
231
232 u32 id;
233};
234
235static inline struct udma_dev *to_udma_dev(struct dma_device *d)
236{
237 return container_of(d, struct udma_dev, ddev);
238}
239
240static inline struct udma_chan *to_udma_chan(struct dma_chan *c)
241{
242 return container_of(c, struct udma_chan, vc.chan);
243}
244
245static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t)
246{
247 return container_of(t, struct udma_desc, vd.tx);
248}
249
250/* Generic register access functions */
251static inline u32 udma_read(void __iomem *base, int reg)
252{
253 return readl(base + reg);
254}
255
256static inline void udma_write(void __iomem *base, int reg, u32 val)
257{
258 writel(val, base + reg);
259}
260
261static inline void udma_update_bits(void __iomem *base, int reg,
262 u32 mask, u32 val)
263{
264 u32 tmp, orig;
265
266 orig = readl(base + reg);
267 tmp = orig & ~mask;
268 tmp |= (val & mask);
269
270 if (tmp != orig)
271 writel(tmp, base + reg);
272}
273
274/* TCHANRT */
275static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
276{
277 if (!tchan)
278 return 0;
279 return udma_read(tchan->reg_rt, reg);
280}
281
282static inline void udma_tchanrt_write(struct udma_tchan *tchan, int reg,
283 u32 val)
284{
285 if (!tchan)
286 return;
287 udma_write(tchan->reg_rt, reg, val);
288}
289
290static inline void udma_tchanrt_update_bits(struct udma_tchan *tchan, int reg,
291 u32 mask, u32 val)
292{
293 if (!tchan)
294 return;
295 udma_update_bits(tchan->reg_rt, reg, mask, val);
296}
297
298/* RCHANRT */
299static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
300{
301 if (!rchan)
302 return 0;
303 return udma_read(rchan->reg_rt, reg);
304}
305
306static inline void udma_rchanrt_write(struct udma_rchan *rchan, int reg,
307 u32 val)
308{
309 if (!rchan)
310 return;
311 udma_write(rchan->reg_rt, reg, val);
312}
313
314static inline void udma_rchanrt_update_bits(struct udma_rchan *rchan, int reg,
315 u32 mask, u32 val)
316{
317 if (!rchan)
318 return;
319 udma_update_bits(rchan->reg_rt, reg, mask, val);
320}
321
322static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
323{
324 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
325
326 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
327 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
328 tisci_rm->tisci_navss_dev_id,
329 src_thread, dst_thread);
330}
331
332static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
333 u32 dst_thread)
334{
335 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
336
337 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
338 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
339 tisci_rm->tisci_navss_dev_id,
340 src_thread, dst_thread);
341}
342
343static void udma_reset_uchan(struct udma_chan *uc)
344{
345 memset(&uc->config, 0, sizeof(uc->config));
346 uc->config.remote_thread_id = -1;
347 uc->state = UDMA_CHAN_IS_IDLE;
348}
349
350static void udma_dump_chan_stdata(struct udma_chan *uc)
351{
352 struct device *dev = uc->ud->dev;
353 u32 offset;
354 int i;
355
356 if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) {
357 dev_dbg(dev, "TCHAN State data:\n");
358 for (i = 0; i < 32; i++) {
359 offset = UDMA_TCHAN_RT_STDATA_REG + i * 4;
360 dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i,
361 udma_tchanrt_read(uc->tchan, offset));
362 }
363 }
364
365 if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) {
366 dev_dbg(dev, "RCHAN State data:\n");
367 for (i = 0; i < 32; i++) {
368 offset = UDMA_RCHAN_RT_STDATA_REG + i * 4;
369 dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i,
370 udma_rchanrt_read(uc->rchan, offset));
371 }
372 }
373}
374
375static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d,
376 int idx)
377{
378 return d->hwdesc[idx].cppi5_desc_paddr;
379}
380
381static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx)
382{
383 return d->hwdesc[idx].cppi5_desc_vaddr;
384}
385
386static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc,
387 dma_addr_t paddr)
388{
389 struct udma_desc *d = uc->terminated_desc;
390
391 if (d) {
392 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
393 d->desc_idx);
394
395 if (desc_paddr != paddr)
396 d = NULL;
397 }
398
399 if (!d) {
400 d = uc->desc;
401 if (d) {
402 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
403 d->desc_idx);
404
405 if (desc_paddr != paddr)
406 d = NULL;
407 }
408 }
409
410 return d;
411}
412
413static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
414{
415 if (uc->use_dma_pool) {
416 int i;
417
418 for (i = 0; i < d->hwdesc_count; i++) {
419 if (!d->hwdesc[i].cppi5_desc_vaddr)
420 continue;
421
422 dma_pool_free(uc->hdesc_pool,
423 d->hwdesc[i].cppi5_desc_vaddr,
424 d->hwdesc[i].cppi5_desc_paddr);
425
426 d->hwdesc[i].cppi5_desc_vaddr = NULL;
427 }
428 } else if (d->hwdesc[0].cppi5_desc_vaddr) {
429 struct udma_dev *ud = uc->ud;
430
431 dma_free_coherent(ud->dev, d->hwdesc[0].cppi5_desc_size,
432 d->hwdesc[0].cppi5_desc_vaddr,
433 d->hwdesc[0].cppi5_desc_paddr);
434
435 d->hwdesc[0].cppi5_desc_vaddr = NULL;
436 }
437}
438
439static void udma_purge_desc_work(struct work_struct *work)
440{
441 struct udma_dev *ud = container_of(work, typeof(*ud), purge_work);
442 struct virt_dma_desc *vd, *_vd;
443 unsigned long flags;
444 LIST_HEAD(head);
445
446 spin_lock_irqsave(&ud->lock, flags);
447 list_splice_tail_init(&ud->desc_to_purge, &head);
448 spin_unlock_irqrestore(&ud->lock, flags);
449
450 list_for_each_entry_safe(vd, _vd, &head, node) {
451 struct udma_chan *uc = to_udma_chan(vd->tx.chan);
452 struct udma_desc *d = to_udma_desc(&vd->tx);
453
454 udma_free_hwdesc(uc, d);
455 list_del(&vd->node);
456 kfree(d);
457 }
458
459 /* If more to purge, schedule the work again */
460 if (!list_empty(&ud->desc_to_purge))
461 schedule_work(&ud->purge_work);
462}
463
464static void udma_desc_free(struct virt_dma_desc *vd)
465{
466 struct udma_dev *ud = to_udma_dev(vd->tx.chan->device);
467 struct udma_chan *uc = to_udma_chan(vd->tx.chan);
468 struct udma_desc *d = to_udma_desc(&vd->tx);
469 unsigned long flags;
470
471 if (uc->terminated_desc == d)
472 uc->terminated_desc = NULL;
473
474 if (uc->use_dma_pool) {
475 udma_free_hwdesc(uc, d);
476 kfree(d);
477 return;
478 }
479
480 spin_lock_irqsave(&ud->lock, flags);
481 list_add_tail(&vd->node, &ud->desc_to_purge);
482 spin_unlock_irqrestore(&ud->lock, flags);
483
484 schedule_work(&ud->purge_work);
485}
486
487static bool udma_is_chan_running(struct udma_chan *uc)
488{
489 u32 trt_ctl = 0;
490 u32 rrt_ctl = 0;
491
492 if (uc->tchan)
493 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
494 if (uc->rchan)
495 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
496
497 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
498 return true;
499
500 return false;
501}
502
503static bool udma_is_chan_paused(struct udma_chan *uc)
504{
505 u32 val, pause_mask;
506
507 switch (uc->desc->dir) {
508 case DMA_DEV_TO_MEM:
509 val = udma_rchanrt_read(uc->rchan,
510 UDMA_RCHAN_RT_PEER_RT_EN_REG);
511 pause_mask = UDMA_PEER_RT_EN_PAUSE;
512 break;
513 case DMA_MEM_TO_DEV:
514 val = udma_tchanrt_read(uc->tchan,
515 UDMA_TCHAN_RT_PEER_RT_EN_REG);
516 pause_mask = UDMA_PEER_RT_EN_PAUSE;
517 break;
518 case DMA_MEM_TO_MEM:
519 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
520 pause_mask = UDMA_CHAN_RT_CTL_PAUSE;
521 break;
522 default:
523 return false;
524 }
525
526 if (val & pause_mask)
527 return true;
528
529 return false;
530}
531
532static void udma_sync_for_device(struct udma_chan *uc, int idx)
533{
534 struct udma_desc *d = uc->desc;
535
536 if (uc->cyclic && uc->config.pkt_mode) {
537 dma_sync_single_for_device(uc->ud->dev,
538 d->hwdesc[idx].cppi5_desc_paddr,
539 d->hwdesc[idx].cppi5_desc_size,
540 DMA_TO_DEVICE);
541 } else {
542 int i;
543
544 for (i = 0; i < d->hwdesc_count; i++) {
545 if (!d->hwdesc[i].cppi5_desc_vaddr)
546 continue;
547
548 dma_sync_single_for_device(uc->ud->dev,
549 d->hwdesc[i].cppi5_desc_paddr,
550 d->hwdesc[i].cppi5_desc_size,
551 DMA_TO_DEVICE);
552 }
553 }
554}
555
556static int udma_push_to_ring(struct udma_chan *uc, int idx)
557{
558 struct udma_desc *d = uc->desc;
559
560 struct k3_ring *ring = NULL;
561 int ret = -EINVAL;
562
563 switch (uc->config.dir) {
564 case DMA_DEV_TO_MEM:
565 ring = uc->rflow->fd_ring;
566 break;
567 case DMA_MEM_TO_DEV:
568 case DMA_MEM_TO_MEM:
569 ring = uc->tchan->t_ring;
570 break;
571 default:
572 break;
573 }
574
575 if (ring) {
576 dma_addr_t desc_addr = udma_curr_cppi5_desc_paddr(d, idx);
577
578 wmb(); /* Ensure that writes are not moved over this point */
579 udma_sync_for_device(uc, idx);
580 ret = k3_ringacc_ring_push(ring, &desc_addr);
581 uc->in_ring_cnt++;
582 }
583
584 return ret;
585}
586
587static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
588{
589 struct k3_ring *ring = NULL;
590 int ret = -ENOENT;
591
592 switch (uc->config.dir) {
593 case DMA_DEV_TO_MEM:
594 ring = uc->rflow->r_ring;
595 break;
596 case DMA_MEM_TO_DEV:
597 case DMA_MEM_TO_MEM:
598 ring = uc->tchan->tc_ring;
599 break;
600 default:
601 break;
602 }
603
604 if (ring && k3_ringacc_ring_get_occ(ring)) {
605 struct udma_desc *d = NULL;
606
607 ret = k3_ringacc_ring_pop(ring, addr);
608 if (ret)
609 return ret;
610
611 /* Teardown completion */
612 if (cppi5_desc_is_tdcm(*addr))
613 return ret;
614
615 d = udma_udma_desc_from_paddr(uc, *addr);
616
617 if (d)
618 dma_sync_single_for_cpu(uc->ud->dev, *addr,
619 d->hwdesc[0].cppi5_desc_size,
620 DMA_FROM_DEVICE);
621 rmb(); /* Ensure that reads are not moved before this point */
622
623 if (!ret)
624 uc->in_ring_cnt--;
625 }
626
627 return ret;
628}
629
630static void udma_reset_rings(struct udma_chan *uc)
631{
632 struct k3_ring *ring1 = NULL;
633 struct k3_ring *ring2 = NULL;
634
635 switch (uc->config.dir) {
636 case DMA_DEV_TO_MEM:
637 if (uc->rchan) {
638 ring1 = uc->rflow->fd_ring;
639 ring2 = uc->rflow->r_ring;
640 }
641 break;
642 case DMA_MEM_TO_DEV:
643 case DMA_MEM_TO_MEM:
644 if (uc->tchan) {
645 ring1 = uc->tchan->t_ring;
646 ring2 = uc->tchan->tc_ring;
647 }
648 break;
649 default:
650 break;
651 }
652
653 if (ring1)
654 k3_ringacc_ring_reset_dma(ring1,
655 k3_ringacc_ring_get_occ(ring1));
656 if (ring2)
657 k3_ringacc_ring_reset(ring2);
658
659 /* make sure we are not leaking memory by stalled descriptor */
660 if (uc->terminated_desc) {
661 udma_desc_free(&uc->terminated_desc->vd);
662 uc->terminated_desc = NULL;
663 }
664
665 uc->in_ring_cnt = 0;
666}
667
668static void udma_reset_counters(struct udma_chan *uc)
669{
670 u32 val;
671
672 if (uc->tchan) {
673 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
674 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
675
676 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
677 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
678
679 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
680 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
681
682 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
683 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
684 }
685
686 if (uc->rchan) {
687 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
688 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
689
690 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
691 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
692
693 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
694 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
695
696 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
697 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
698 }
699
700 uc->bcnt = 0;
701}
702
703static int udma_reset_chan(struct udma_chan *uc, bool hard)
704{
705 switch (uc->config.dir) {
706 case DMA_DEV_TO_MEM:
707 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
708 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
709 break;
710 case DMA_MEM_TO_DEV:
711 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
712 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
713 break;
714 case DMA_MEM_TO_MEM:
715 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
716 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
717 break;
718 default:
719 return -EINVAL;
720 }
721
722 /* Reset all counters */
723 udma_reset_counters(uc);
724
725 /* Hard reset: re-initialize the channel to reset */
726 if (hard) {
727 struct udma_chan_config ucc_backup;
728 int ret;
729
730 memcpy(&ucc_backup, &uc->config, sizeof(uc->config));
731 uc->ud->ddev.device_free_chan_resources(&uc->vc.chan);
732
733 /* restore the channel configuration */
734 memcpy(&uc->config, &ucc_backup, sizeof(uc->config));
735 ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan);
736 if (ret)
737 return ret;
738
739 /*
740 * Setting forced teardown after forced reset helps recovering
741 * the rchan.
742 */
743 if (uc->config.dir == DMA_DEV_TO_MEM)
744 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
745 UDMA_CHAN_RT_CTL_EN |
746 UDMA_CHAN_RT_CTL_TDOWN |
747 UDMA_CHAN_RT_CTL_FTDOWN);
748 }
749 uc->state = UDMA_CHAN_IS_IDLE;
750
751 return 0;
752}
753
754static void udma_start_desc(struct udma_chan *uc)
755{
756 struct udma_chan_config *ucc = &uc->config;
757
758 if (ucc->pkt_mode && (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
759 int i;
760
761 /* Push all descriptors to ring for packet mode cyclic or RX */
762 for (i = 0; i < uc->desc->sglen; i++)
763 udma_push_to_ring(uc, i);
764 } else {
765 udma_push_to_ring(uc, 0);
766 }
767}
768
769static bool udma_chan_needs_reconfiguration(struct udma_chan *uc)
770{
771 /* Only PDMAs have staticTR */
772 if (uc->config.ep_type == PSIL_EP_NATIVE)
773 return false;
774
775 /* Check if the staticTR configuration has changed for TX */
776 if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr)))
777 return true;
778
779 return false;
780}
781
782static int udma_start(struct udma_chan *uc)
783{
784 struct virt_dma_desc *vd = vchan_next_desc(&uc->vc);
785
786 if (!vd) {
787 uc->desc = NULL;
788 return -ENOENT;
789 }
790
791 list_del(&vd->node);
792
793 uc->desc = to_udma_desc(&vd->tx);
794
795 /* Channel is already running and does not need reconfiguration */
796 if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) {
797 udma_start_desc(uc);
798 goto out;
799 }
800
801 /* Make sure that we clear the teardown bit, if it is set */
802 udma_reset_chan(uc, false);
803
804 /* Push descriptors before we start the channel */
805 udma_start_desc(uc);
806
807 switch (uc->desc->dir) {
808 case DMA_DEV_TO_MEM:
809 /* Config remote TR */
810 if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
811 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
812 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
813 const struct udma_match_data *match_data =
814 uc->ud->match_data;
815
816 if (uc->config.enable_acc32)
817 val |= PDMA_STATIC_TR_XY_ACC32;
818 if (uc->config.enable_burst)
819 val |= PDMA_STATIC_TR_XY_BURST;
820
821 udma_rchanrt_write(uc->rchan,
822 UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG, val);
823
824 udma_rchanrt_write(uc->rchan,
825 UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG,
826 PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt,
827 match_data->statictr_z_mask));
828
829 /* save the current staticTR configuration */
830 memcpy(&uc->static_tr, &uc->desc->static_tr,
831 sizeof(uc->static_tr));
832 }
833
834 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
835 UDMA_CHAN_RT_CTL_EN);
836
837 /* Enable remote */
838 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
839 UDMA_PEER_RT_EN_ENABLE);
840
841 break;
842 case DMA_MEM_TO_DEV:
843 /* Config remote TR */
844 if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
845 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
846 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
847
848 if (uc->config.enable_acc32)
849 val |= PDMA_STATIC_TR_XY_ACC32;
850 if (uc->config.enable_burst)
851 val |= PDMA_STATIC_TR_XY_BURST;
852
853 udma_tchanrt_write(uc->tchan,
854 UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG, val);
855
856 /* save the current staticTR configuration */
857 memcpy(&uc->static_tr, &uc->desc->static_tr,
858 sizeof(uc->static_tr));
859 }
860
861 /* Enable remote */
862 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
863 UDMA_PEER_RT_EN_ENABLE);
864
865 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
866 UDMA_CHAN_RT_CTL_EN);
867
868 break;
869 case DMA_MEM_TO_MEM:
870 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
871 UDMA_CHAN_RT_CTL_EN);
872 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
873 UDMA_CHAN_RT_CTL_EN);
874
875 break;
876 default:
877 return -EINVAL;
878 }
879
880 uc->state = UDMA_CHAN_IS_ACTIVE;
881out:
882
883 return 0;
884}
885
886static int udma_stop(struct udma_chan *uc)
887{
888 enum udma_chan_state old_state = uc->state;
889
890 uc->state = UDMA_CHAN_IS_TERMINATING;
891 reinit_completion(&uc->teardown_completed);
892
893 switch (uc->config.dir) {
894 case DMA_DEV_TO_MEM:
895 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
896 UDMA_PEER_RT_EN_ENABLE |
897 UDMA_PEER_RT_EN_TEARDOWN);
898 break;
899 case DMA_MEM_TO_DEV:
900 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
901 UDMA_PEER_RT_EN_ENABLE |
902 UDMA_PEER_RT_EN_FLUSH);
903 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
904 UDMA_CHAN_RT_CTL_EN |
905 UDMA_CHAN_RT_CTL_TDOWN);
906 break;
907 case DMA_MEM_TO_MEM:
908 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
909 UDMA_CHAN_RT_CTL_EN |
910 UDMA_CHAN_RT_CTL_TDOWN);
911 break;
912 default:
913 uc->state = old_state;
914 complete_all(&uc->teardown_completed);
915 return -EINVAL;
916 }
917
918 return 0;
919}
920
921static void udma_cyclic_packet_elapsed(struct udma_chan *uc)
922{
923 struct udma_desc *d = uc->desc;
924 struct cppi5_host_desc_t *h_desc;
925
926 h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr;
927 cppi5_hdesc_reset_to_original(h_desc);
928 udma_push_to_ring(uc, d->desc_idx);
929 d->desc_idx = (d->desc_idx + 1) % d->sglen;
930}
931
932static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d)
933{
934 struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr;
935
936 memcpy(d->metadata, h_desc->epib, d->metadata_size);
937}
938
939static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
940{
941 u32 peer_bcnt, bcnt;
942
943 /* Only TX towards PDMA is affected */
944 if (uc->config.ep_type == PSIL_EP_NATIVE ||
945 uc->config.dir != DMA_MEM_TO_DEV)
946 return true;
947
948 peer_bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
949 bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
950
951 if (peer_bcnt < bcnt) {
952 uc->tx_drain.residue = bcnt - peer_bcnt;
953 uc->tx_drain.jiffie = jiffies;
954 return false;
955 }
956
957 return true;
958}
959
960static void udma_check_tx_completion(struct work_struct *work)
961{
962 struct udma_chan *uc = container_of(work, typeof(*uc),
963 tx_drain.work.work);
964 bool desc_done = true;
965 u32 residue_diff;
966 unsigned long jiffie_diff, delay;
967
968 if (uc->desc) {
969 residue_diff = uc->tx_drain.residue;
970 jiffie_diff = uc->tx_drain.jiffie;
971 desc_done = udma_is_desc_really_done(uc, uc->desc);
972 }
973
974 if (!desc_done) {
975 jiffie_diff = uc->tx_drain.jiffie - jiffie_diff;
976 residue_diff -= uc->tx_drain.residue;
977 if (residue_diff) {
978 /* Try to guess when we should check next time */
979 residue_diff /= jiffie_diff;
980 delay = uc->tx_drain.residue / residue_diff / 3;
981 if (jiffies_to_msecs(delay) < 5)
982 delay = 0;
983 } else {
984 /* No progress, check again in 1 second */
985 delay = HZ;
986 }
987
988 schedule_delayed_work(&uc->tx_drain.work, delay);
989 } else if (uc->desc) {
990 struct udma_desc *d = uc->desc;
991
992 uc->bcnt += d->residue;
993 udma_start(uc);
994 vchan_cookie_complete(&d->vd);
995 }
996}
997
998static irqreturn_t udma_ring_irq_handler(int irq, void *data)
999{
1000 struct udma_chan *uc = data;
1001 struct udma_desc *d;
1002 unsigned long flags;
1003 dma_addr_t paddr = 0;
1004
1005 if (udma_pop_from_ring(uc, &paddr) || !paddr)
1006 return IRQ_HANDLED;
1007
1008 spin_lock_irqsave(&uc->vc.lock, flags);
1009
1010 /* Teardown completion message */
1011 if (cppi5_desc_is_tdcm(paddr)) {
1012 /* Compensate our internal pop/push counter */
1013 uc->in_ring_cnt++;
1014
1015 complete_all(&uc->teardown_completed);
1016
1017 if (uc->terminated_desc) {
1018 udma_desc_free(&uc->terminated_desc->vd);
1019 uc->terminated_desc = NULL;
1020 }
1021
1022 if (!uc->desc)
1023 udma_start(uc);
1024
1025 goto out;
1026 }
1027
1028 d = udma_udma_desc_from_paddr(uc, paddr);
1029
1030 if (d) {
1031 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
1032 d->desc_idx);
1033 if (desc_paddr != paddr) {
1034 dev_err(uc->ud->dev, "not matching descriptors!\n");
1035 goto out;
1036 }
1037
1038 if (uc->cyclic) {
1039 /* push the descriptor back to the ring */
1040 if (d == uc->desc) {
1041 udma_cyclic_packet_elapsed(uc);
1042 vchan_cyclic_callback(&d->vd);
1043 }
1044 } else {
1045 bool desc_done = false;
1046
1047 if (d == uc->desc) {
1048 desc_done = udma_is_desc_really_done(uc, d);
1049
1050 if (desc_done) {
1051 uc->bcnt += d->residue;
1052 udma_start(uc);
1053 } else {
1054 schedule_delayed_work(&uc->tx_drain.work,
1055 0);
1056 }
1057 }
1058
1059 if (desc_done)
1060 vchan_cookie_complete(&d->vd);
1061 }
1062 }
1063out:
1064 spin_unlock_irqrestore(&uc->vc.lock, flags);
1065
1066 return IRQ_HANDLED;
1067}
1068
1069static irqreturn_t udma_udma_irq_handler(int irq, void *data)
1070{
1071 struct udma_chan *uc = data;
1072 struct udma_desc *d;
1073 unsigned long flags;
1074
1075 spin_lock_irqsave(&uc->vc.lock, flags);
1076 d = uc->desc;
1077 if (d) {
1078 d->tr_idx = (d->tr_idx + 1) % d->sglen;
1079
1080 if (uc->cyclic) {
1081 vchan_cyclic_callback(&d->vd);
1082 } else {
1083 /* TODO: figure out the real amount of data */
1084 uc->bcnt += d->residue;
1085 udma_start(uc);
1086 vchan_cookie_complete(&d->vd);
1087 }
1088 }
1089
1090 spin_unlock_irqrestore(&uc->vc.lock, flags);
1091
1092 return IRQ_HANDLED;
1093}
1094
Grygorii Strashkod7024192019-12-23 13:04:51 +02001095/**
1096 * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
1097 * @ud: UDMA device
1098 * @from: Start the search from this flow id number
1099 * @cnt: Number of consecutive flow ids to allocate
1100 *
1101 * Allocate range of RX flow ids for future use, those flows can be requested
1102 * only using explicit flow id number. if @from is set to -1 it will try to find
1103 * first free range. if @from is positive value it will force allocation only
1104 * of the specified range of flows.
1105 *
1106 * Returns -ENOMEM if can't find free range.
1107 * -EEXIST if requested range is busy.
1108 * -EINVAL if wrong input values passed.
1109 * Returns flow id on success.
1110 */
1111static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1112{
1113 int start, tmp_from;
1114 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
1115
1116 tmp_from = from;
1117 if (tmp_from < 0)
1118 tmp_from = ud->rchan_cnt;
1119 /* default flows can't be allocated and accessible only by id */
1120 if (tmp_from < ud->rchan_cnt)
1121 return -EINVAL;
1122
1123 if (tmp_from + cnt > ud->rflow_cnt)
1124 return -EINVAL;
1125
1126 bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
1127 ud->rflow_cnt);
1128
1129 start = bitmap_find_next_zero_area(tmp,
1130 ud->rflow_cnt,
1131 tmp_from, cnt, 0);
1132 if (start >= ud->rflow_cnt)
1133 return -ENOMEM;
1134
1135 if (from >= 0 && start != from)
1136 return -EEXIST;
1137
1138 bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
1139 return start;
1140}
1141
1142static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1143{
1144 if (from < ud->rchan_cnt)
1145 return -EINVAL;
1146 if (from + cnt > ud->rflow_cnt)
1147 return -EINVAL;
1148
1149 bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
1150 return 0;
1151}
1152
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001153static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
1154{
1155 /*
1156 * Attempt to request rflow by ID can be made for any rflow
1157 * if not in use with assumption that caller knows what's doing.
1158 * TI-SCI FW will perform additional permission check ant way, it's
1159 * safe
1160 */
1161
1162 if (id < 0 || id >= ud->rflow_cnt)
1163 return ERR_PTR(-ENOENT);
1164
1165 if (test_bit(id, ud->rflow_in_use))
1166 return ERR_PTR(-ENOENT);
1167
1168 /* GP rflow has to be allocated first */
1169 if (!test_bit(id, ud->rflow_gp_map) &&
1170 !test_bit(id, ud->rflow_gp_map_allocated))
1171 return ERR_PTR(-EINVAL);
1172
1173 dev_dbg(ud->dev, "get rflow%d\n", id);
1174 set_bit(id, ud->rflow_in_use);
1175 return &ud->rflows[id];
1176}
1177
1178static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
1179{
1180 if (!test_bit(rflow->id, ud->rflow_in_use)) {
1181 dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
1182 return;
1183 }
1184
1185 dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
1186 clear_bit(rflow->id, ud->rflow_in_use);
1187}
1188
1189#define UDMA_RESERVE_RESOURCE(res) \
1190static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
1191 enum udma_tp_level tpl, \
1192 int id) \
1193{ \
1194 if (id >= 0) { \
1195 if (test_bit(id, ud->res##_map)) { \
1196 dev_err(ud->dev, "res##%d is in use\n", id); \
1197 return ERR_PTR(-ENOENT); \
1198 } \
1199 } else { \
1200 int start; \
1201 \
1202 if (tpl >= ud->match_data->tpl_levels) \
1203 tpl = ud->match_data->tpl_levels - 1; \
1204 \
1205 start = ud->match_data->level_start_idx[tpl]; \
1206 \
1207 id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
1208 start); \
1209 if (id == ud->res##_cnt) { \
1210 return ERR_PTR(-ENOENT); \
1211 } \
1212 } \
1213 \
1214 set_bit(id, ud->res##_map); \
1215 return &ud->res##s[id]; \
1216}
1217
1218UDMA_RESERVE_RESOURCE(tchan);
1219UDMA_RESERVE_RESOURCE(rchan);
1220
1221static int udma_get_tchan(struct udma_chan *uc)
1222{
1223 struct udma_dev *ud = uc->ud;
1224
1225 if (uc->tchan) {
1226 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
1227 uc->id, uc->tchan->id);
1228 return 0;
1229 }
1230
1231 uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, -1);
1232 if (IS_ERR(uc->tchan))
1233 return PTR_ERR(uc->tchan);
1234
1235 return 0;
1236}
1237
1238static int udma_get_rchan(struct udma_chan *uc)
1239{
1240 struct udma_dev *ud = uc->ud;
1241
1242 if (uc->rchan) {
1243 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
1244 uc->id, uc->rchan->id);
1245 return 0;
1246 }
1247
1248 uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, -1);
1249 if (IS_ERR(uc->rchan))
1250 return PTR_ERR(uc->rchan);
1251
1252 return 0;
1253}
1254
1255static int udma_get_chan_pair(struct udma_chan *uc)
1256{
1257 struct udma_dev *ud = uc->ud;
1258 const struct udma_match_data *match_data = ud->match_data;
1259 int chan_id, end;
1260
1261 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
1262 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
1263 uc->id, uc->tchan->id);
1264 return 0;
1265 }
1266
1267 if (uc->tchan) {
1268 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
1269 uc->id, uc->tchan->id);
1270 return -EBUSY;
1271 } else if (uc->rchan) {
1272 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
1273 uc->id, uc->rchan->id);
1274 return -EBUSY;
1275 }
1276
1277 /* Can be optimized, but let's have it like this for now */
1278 end = min(ud->tchan_cnt, ud->rchan_cnt);
1279 /* Try to use the highest TPL channel pair for MEM_TO_MEM channels */
1280 chan_id = match_data->level_start_idx[match_data->tpl_levels - 1];
1281 for (; chan_id < end; chan_id++) {
1282 if (!test_bit(chan_id, ud->tchan_map) &&
1283 !test_bit(chan_id, ud->rchan_map))
1284 break;
1285 }
1286
1287 if (chan_id == end)
1288 return -ENOENT;
1289
1290 set_bit(chan_id, ud->tchan_map);
1291 set_bit(chan_id, ud->rchan_map);
1292 uc->tchan = &ud->tchans[chan_id];
1293 uc->rchan = &ud->rchans[chan_id];
1294
1295 return 0;
1296}
1297
1298static int udma_get_rflow(struct udma_chan *uc, int flow_id)
1299{
1300 struct udma_dev *ud = uc->ud;
1301
1302 if (!uc->rchan) {
1303 dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
1304 return -EINVAL;
1305 }
1306
1307 if (uc->rflow) {
1308 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
1309 uc->id, uc->rflow->id);
1310 return 0;
1311 }
1312
1313 uc->rflow = __udma_get_rflow(ud, flow_id);
1314 if (IS_ERR(uc->rflow))
1315 return PTR_ERR(uc->rflow);
1316
1317 return 0;
1318}
1319
1320static void udma_put_rchan(struct udma_chan *uc)
1321{
1322 struct udma_dev *ud = uc->ud;
1323
1324 if (uc->rchan) {
1325 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
1326 uc->rchan->id);
1327 clear_bit(uc->rchan->id, ud->rchan_map);
1328 uc->rchan = NULL;
1329 }
1330}
1331
1332static void udma_put_tchan(struct udma_chan *uc)
1333{
1334 struct udma_dev *ud = uc->ud;
1335
1336 if (uc->tchan) {
1337 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
1338 uc->tchan->id);
1339 clear_bit(uc->tchan->id, ud->tchan_map);
1340 uc->tchan = NULL;
1341 }
1342}
1343
1344static void udma_put_rflow(struct udma_chan *uc)
1345{
1346 struct udma_dev *ud = uc->ud;
1347
1348 if (uc->rflow) {
1349 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
1350 uc->rflow->id);
1351 __udma_put_rflow(ud, uc->rflow);
1352 uc->rflow = NULL;
1353 }
1354}
1355
1356static void udma_free_tx_resources(struct udma_chan *uc)
1357{
1358 if (!uc->tchan)
1359 return;
1360
1361 k3_ringacc_ring_free(uc->tchan->t_ring);
1362 k3_ringacc_ring_free(uc->tchan->tc_ring);
1363 uc->tchan->t_ring = NULL;
1364 uc->tchan->tc_ring = NULL;
1365
1366 udma_put_tchan(uc);
1367}
1368
1369static int udma_alloc_tx_resources(struct udma_chan *uc)
1370{
1371 struct k3_ring_cfg ring_cfg;
1372 struct udma_dev *ud = uc->ud;
1373 int ret;
1374
1375 ret = udma_get_tchan(uc);
1376 if (ret)
1377 return ret;
1378
1379 uc->tchan->t_ring = k3_ringacc_request_ring(ud->ringacc,
1380 uc->tchan->id, 0);
1381 if (!uc->tchan->t_ring) {
1382 ret = -EBUSY;
1383 goto err_tx_ring;
1384 }
1385
1386 uc->tchan->tc_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0);
1387 if (!uc->tchan->tc_ring) {
1388 ret = -EBUSY;
1389 goto err_txc_ring;
1390 }
1391
1392 memset(&ring_cfg, 0, sizeof(ring_cfg));
1393 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1394 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1395 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1396
1397 ret = k3_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
1398 ret |= k3_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
1399
1400 if (ret)
1401 goto err_ringcfg;
1402
1403 return 0;
1404
1405err_ringcfg:
1406 k3_ringacc_ring_free(uc->tchan->tc_ring);
1407 uc->tchan->tc_ring = NULL;
1408err_txc_ring:
1409 k3_ringacc_ring_free(uc->tchan->t_ring);
1410 uc->tchan->t_ring = NULL;
1411err_tx_ring:
1412 udma_put_tchan(uc);
1413
1414 return ret;
1415}
1416
1417static void udma_free_rx_resources(struct udma_chan *uc)
1418{
1419 if (!uc->rchan)
1420 return;
1421
1422 if (uc->rflow) {
1423 struct udma_rflow *rflow = uc->rflow;
1424
1425 k3_ringacc_ring_free(rflow->fd_ring);
1426 k3_ringacc_ring_free(rflow->r_ring);
1427 rflow->fd_ring = NULL;
1428 rflow->r_ring = NULL;
1429
1430 udma_put_rflow(uc);
1431 }
1432
1433 udma_put_rchan(uc);
1434}
1435
1436static int udma_alloc_rx_resources(struct udma_chan *uc)
1437{
1438 struct udma_dev *ud = uc->ud;
1439 struct k3_ring_cfg ring_cfg;
1440 struct udma_rflow *rflow;
1441 int fd_ring_id;
1442 int ret;
1443
1444 ret = udma_get_rchan(uc);
1445 if (ret)
1446 return ret;
1447
1448 /* For MEM_TO_MEM we don't need rflow or rings */
1449 if (uc->config.dir == DMA_MEM_TO_MEM)
1450 return 0;
1451
1452 ret = udma_get_rflow(uc, uc->rchan->id);
1453 if (ret) {
1454 ret = -EBUSY;
1455 goto err_rflow;
1456 }
1457
1458 rflow = uc->rflow;
1459 fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
1460 rflow->fd_ring = k3_ringacc_request_ring(ud->ringacc, fd_ring_id, 0);
1461 if (!rflow->fd_ring) {
1462 ret = -EBUSY;
1463 goto err_rx_ring;
1464 }
1465
1466 rflow->r_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0);
1467 if (!rflow->r_ring) {
1468 ret = -EBUSY;
1469 goto err_rxc_ring;
1470 }
1471
1472 memset(&ring_cfg, 0, sizeof(ring_cfg));
1473
1474 if (uc->config.pkt_mode)
1475 ring_cfg.size = SG_MAX_SEGMENTS;
1476 else
1477 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1478
1479 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1480 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1481
1482 ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
1483 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1484 ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
1485
1486 if (ret)
1487 goto err_ringcfg;
1488
1489 return 0;
1490
1491err_ringcfg:
1492 k3_ringacc_ring_free(rflow->r_ring);
1493 rflow->r_ring = NULL;
1494err_rxc_ring:
1495 k3_ringacc_ring_free(rflow->fd_ring);
1496 rflow->fd_ring = NULL;
1497err_rx_ring:
1498 udma_put_rflow(uc);
1499err_rflow:
1500 udma_put_rchan(uc);
1501
1502 return ret;
1503}
1504
1505#define TISCI_TCHAN_VALID_PARAMS ( \
1506 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1507 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
1508 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
1509 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1510 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
1511 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001512 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1513 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001514
1515#define TISCI_RCHAN_VALID_PARAMS ( \
1516 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1517 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1518 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1519 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1520 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
1521 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
1522 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001523 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \
1524 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001525
1526static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
1527{
1528 struct udma_dev *ud = uc->ud;
1529 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1530 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1531 struct udma_tchan *tchan = uc->tchan;
1532 struct udma_rchan *rchan = uc->rchan;
1533 int ret = 0;
1534
1535 /* Non synchronized - mem to mem type of transfer */
1536 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1537 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1538 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1539
1540 req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
1541 req_tx.nav_id = tisci_rm->tisci_dev_id;
1542 req_tx.index = tchan->id;
1543 req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1544 req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1545 req_tx.txcq_qnum = tc_ring;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001546 req_tx.tx_atype = ud->atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001547
1548 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1549 if (ret) {
1550 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1551 return ret;
1552 }
1553
1554 req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
1555 req_rx.nav_id = tisci_rm->tisci_dev_id;
1556 req_rx.index = rchan->id;
1557 req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1558 req_rx.rxcq_qnum = tc_ring;
1559 req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001560 req_rx.rx_atype = ud->atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001561
1562 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
1563 if (ret)
1564 dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret);
1565
1566 return ret;
1567}
1568
1569static int udma_tisci_tx_channel_config(struct udma_chan *uc)
1570{
1571 struct udma_dev *ud = uc->ud;
1572 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1573 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1574 struct udma_tchan *tchan = uc->tchan;
1575 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1576 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1577 u32 mode, fetch_size;
1578 int ret = 0;
1579
1580 if (uc->config.pkt_mode) {
1581 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1582 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1583 uc->config.psd_size, 0);
1584 } else {
1585 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
1586 fetch_size = sizeof(struct cppi5_desc_hdr_t);
1587 }
1588
1589 req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
1590 req_tx.nav_id = tisci_rm->tisci_dev_id;
1591 req_tx.index = tchan->id;
1592 req_tx.tx_chan_type = mode;
1593 req_tx.tx_supr_tdpkt = uc->config.notdpkt;
1594 req_tx.tx_fetch_size = fetch_size >> 2;
1595 req_tx.txcq_qnum = tc_ring;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001596 req_tx.tx_atype = uc->config.atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001597
1598 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1599 if (ret)
1600 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1601
1602 return ret;
1603}
1604
1605static int udma_tisci_rx_channel_config(struct udma_chan *uc)
1606{
1607 struct udma_dev *ud = uc->ud;
1608 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1609 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1610 struct udma_rchan *rchan = uc->rchan;
1611 int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring);
1612 int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring);
1613 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1614 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
1615 u32 mode, fetch_size;
1616 int ret = 0;
1617
1618 if (uc->config.pkt_mode) {
1619 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1620 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1621 uc->config.psd_size, 0);
1622 } else {
1623 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
1624 fetch_size = sizeof(struct cppi5_desc_hdr_t);
1625 }
1626
1627 req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
1628 req_rx.nav_id = tisci_rm->tisci_dev_id;
1629 req_rx.index = rchan->id;
1630 req_rx.rx_fetch_size = fetch_size >> 2;
1631 req_rx.rxcq_qnum = rx_ring;
1632 req_rx.rx_chan_type = mode;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001633 req_rx.rx_atype = uc->config.atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001634
1635 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
1636 if (ret) {
1637 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
1638 return ret;
1639 }
1640
1641 flow_req.valid_params =
1642 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
1643 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
1644 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
1645 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
1646 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1647 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
1648 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
1649 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
1650 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
1651 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1652 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1653 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1654 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1655
1656 flow_req.nav_id = tisci_rm->tisci_dev_id;
1657 flow_req.flow_index = rchan->id;
1658
1659 if (uc->config.needs_epib)
1660 flow_req.rx_einfo_present = 1;
1661 else
1662 flow_req.rx_einfo_present = 0;
1663 if (uc->config.psd_size)
1664 flow_req.rx_psinfo_present = 1;
1665 else
1666 flow_req.rx_psinfo_present = 0;
1667 flow_req.rx_error_handling = 1;
1668 flow_req.rx_dest_qnum = rx_ring;
1669 flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE;
1670 flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG;
1671 flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI;
1672 flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO;
1673 flow_req.rx_fdq0_sz0_qnum = fd_ring;
1674 flow_req.rx_fdq1_qnum = fd_ring;
1675 flow_req.rx_fdq2_qnum = fd_ring;
1676 flow_req.rx_fdq3_qnum = fd_ring;
1677
1678 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
1679
1680 if (ret)
1681 dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret);
1682
1683 return 0;
1684}
1685
1686static int udma_alloc_chan_resources(struct dma_chan *chan)
1687{
1688 struct udma_chan *uc = to_udma_chan(chan);
1689 struct udma_dev *ud = to_udma_dev(chan->device);
1690 const struct udma_match_data *match_data = ud->match_data;
1691 struct k3_ring *irq_ring;
1692 u32 irq_udma_idx;
1693 int ret;
1694
1695 if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
1696 uc->use_dma_pool = true;
1697 /* in case of MEM_TO_MEM we have maximum of two TRs */
1698 if (uc->config.dir == DMA_MEM_TO_MEM) {
1699 uc->config.hdesc_size = cppi5_trdesc_calc_size(
1700 sizeof(struct cppi5_tr_type15_t), 2);
1701 uc->config.pkt_mode = false;
1702 }
1703 }
1704
1705 if (uc->use_dma_pool) {
1706 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
1707 uc->config.hdesc_size,
1708 ud->desc_align,
1709 0);
1710 if (!uc->hdesc_pool) {
1711 dev_err(ud->ddev.dev,
1712 "Descriptor pool allocation failed\n");
1713 uc->use_dma_pool = false;
1714 return -ENOMEM;
1715 }
1716 }
1717
1718 /*
1719 * Make sure that the completion is in a known state:
1720 * No teardown, the channel is idle
1721 */
1722 reinit_completion(&uc->teardown_completed);
1723 complete_all(&uc->teardown_completed);
1724 uc->state = UDMA_CHAN_IS_IDLE;
1725
1726 switch (uc->config.dir) {
1727 case DMA_MEM_TO_MEM:
1728 /* Non synchronized - mem to mem type of transfer */
1729 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
1730 uc->id);
1731
1732 ret = udma_get_chan_pair(uc);
1733 if (ret)
1734 return ret;
1735
1736 ret = udma_alloc_tx_resources(uc);
1737 if (ret)
1738 return ret;
1739
1740 ret = udma_alloc_rx_resources(uc);
1741 if (ret) {
1742 udma_free_tx_resources(uc);
1743 return ret;
1744 }
1745
1746 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1747 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
1748 K3_PSIL_DST_THREAD_ID_OFFSET;
1749
1750 irq_ring = uc->tchan->tc_ring;
1751 irq_udma_idx = uc->tchan->id;
1752
1753 ret = udma_tisci_m2m_channel_config(uc);
1754 break;
1755 case DMA_MEM_TO_DEV:
1756 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1757 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
1758 uc->id);
1759
1760 ret = udma_alloc_tx_resources(uc);
1761 if (ret) {
1762 uc->config.remote_thread_id = -1;
1763 return ret;
1764 }
1765
1766 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1767 uc->config.dst_thread = uc->config.remote_thread_id;
1768 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
1769
1770 irq_ring = uc->tchan->tc_ring;
1771 irq_udma_idx = uc->tchan->id;
1772
1773 ret = udma_tisci_tx_channel_config(uc);
1774 break;
1775 case DMA_DEV_TO_MEM:
1776 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1777 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
1778 uc->id);
1779
1780 ret = udma_alloc_rx_resources(uc);
1781 if (ret) {
1782 uc->config.remote_thread_id = -1;
1783 return ret;
1784 }
1785
1786 uc->config.src_thread = uc->config.remote_thread_id;
1787 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
1788 K3_PSIL_DST_THREAD_ID_OFFSET;
1789
1790 irq_ring = uc->rflow->r_ring;
1791 irq_udma_idx = match_data->rchan_oes_offset + uc->rchan->id;
1792
1793 ret = udma_tisci_rx_channel_config(uc);
1794 break;
1795 default:
1796 /* Can not happen */
1797 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
1798 __func__, uc->id, uc->config.dir);
1799 return -EINVAL;
1800 }
1801
1802 /* check if the channel configuration was successful */
1803 if (ret)
1804 goto err_res_free;
1805
1806 if (udma_is_chan_running(uc)) {
1807 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
1808 udma_stop(uc);
1809 if (udma_is_chan_running(uc)) {
1810 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
1811 goto err_res_free;
1812 }
1813 }
1814
1815 /* PSI-L pairing */
1816 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
1817 if (ret) {
1818 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
1819 uc->config.src_thread, uc->config.dst_thread);
1820 goto err_res_free;
1821 }
1822
1823 uc->psil_paired = true;
1824
1825 uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring);
1826 if (uc->irq_num_ring <= 0) {
1827 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
1828 k3_ringacc_get_ring_id(irq_ring));
1829 ret = -EINVAL;
1830 goto err_psi_free;
1831 }
1832
1833 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
1834 IRQF_TRIGGER_HIGH, uc->name, uc);
1835 if (ret) {
1836 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
1837 goto err_irq_free;
1838 }
1839
1840 /* Event from UDMA (TR events) only needed for slave TR mode channels */
1841 if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
1842 uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev,
1843 irq_udma_idx);
1844 if (uc->irq_num_udma <= 0) {
1845 dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
1846 irq_udma_idx);
1847 free_irq(uc->irq_num_ring, uc);
1848 ret = -EINVAL;
1849 goto err_irq_free;
1850 }
1851
1852 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
1853 uc->name, uc);
1854 if (ret) {
1855 dev_err(ud->dev, "chan%d: UDMA irq request failed\n",
1856 uc->id);
1857 free_irq(uc->irq_num_ring, uc);
1858 goto err_irq_free;
1859 }
1860 } else {
1861 uc->irq_num_udma = 0;
1862 }
1863
1864 udma_reset_rings(uc);
1865
1866 INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
1867 udma_check_tx_completion);
1868 return 0;
1869
1870err_irq_free:
1871 uc->irq_num_ring = 0;
1872 uc->irq_num_udma = 0;
1873err_psi_free:
1874 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
1875 uc->psil_paired = false;
1876err_res_free:
1877 udma_free_tx_resources(uc);
1878 udma_free_rx_resources(uc);
1879
1880 udma_reset_uchan(uc);
1881
1882 if (uc->use_dma_pool) {
1883 dma_pool_destroy(uc->hdesc_pool);
1884 uc->use_dma_pool = false;
1885 }
1886
1887 return ret;
1888}
1889
1890static int udma_slave_config(struct dma_chan *chan,
1891 struct dma_slave_config *cfg)
1892{
1893 struct udma_chan *uc = to_udma_chan(chan);
1894
1895 memcpy(&uc->cfg, cfg, sizeof(uc->cfg));
1896
1897 return 0;
1898}
1899
1900static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc,
1901 size_t tr_size, int tr_count,
1902 enum dma_transfer_direction dir)
1903{
1904 struct udma_hwdesc *hwdesc;
1905 struct cppi5_desc_hdr_t *tr_desc;
1906 struct udma_desc *d;
1907 u32 reload_count = 0;
1908 u32 ring_id;
1909
1910 switch (tr_size) {
1911 case 16:
1912 case 32:
1913 case 64:
1914 case 128:
1915 break;
1916 default:
1917 dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size);
1918 return NULL;
1919 }
1920
1921 /* We have only one descriptor containing multiple TRs */
1922 d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT);
1923 if (!d)
1924 return NULL;
1925
1926 d->sglen = tr_count;
1927
1928 d->hwdesc_count = 1;
1929 hwdesc = &d->hwdesc[0];
1930
1931 /* Allocate memory for DMA ring descriptor */
1932 if (uc->use_dma_pool) {
1933 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
1934 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
1935 GFP_NOWAIT,
1936 &hwdesc->cppi5_desc_paddr);
1937 } else {
1938 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size,
1939 tr_count);
1940 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
1941 uc->ud->desc_align);
1942 hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev,
1943 hwdesc->cppi5_desc_size,
1944 &hwdesc->cppi5_desc_paddr,
1945 GFP_NOWAIT);
1946 }
1947
1948 if (!hwdesc->cppi5_desc_vaddr) {
1949 kfree(d);
1950 return NULL;
1951 }
1952
1953 /* Start of the TR req records */
1954 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
1955 /* Start address of the TR response array */
1956 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count;
1957
1958 tr_desc = hwdesc->cppi5_desc_vaddr;
1959
1960 if (uc->cyclic)
1961 reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE;
1962
1963 if (dir == DMA_DEV_TO_MEM)
1964 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
1965 else
1966 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
1967
1968 cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count);
1969 cppi5_desc_set_pktids(tr_desc, uc->id,
1970 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
1971 cppi5_desc_set_retpolicy(tr_desc, 0, ring_id);
1972
1973 return d;
1974}
1975
1976static struct udma_desc *
1977udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
1978 unsigned int sglen, enum dma_transfer_direction dir,
1979 unsigned long tx_flags, void *context)
1980{
1981 enum dma_slave_buswidth dev_width;
1982 struct scatterlist *sgent;
1983 struct udma_desc *d;
1984 size_t tr_size;
1985 struct cppi5_tr_type1_t *tr_req = NULL;
1986 unsigned int i;
1987 u32 burst;
1988
1989 if (dir == DMA_DEV_TO_MEM) {
1990 dev_width = uc->cfg.src_addr_width;
1991 burst = uc->cfg.src_maxburst;
1992 } else if (dir == DMA_MEM_TO_DEV) {
1993 dev_width = uc->cfg.dst_addr_width;
1994 burst = uc->cfg.dst_maxburst;
1995 } else {
1996 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
1997 return NULL;
1998 }
1999
2000 if (!burst)
2001 burst = 1;
2002
2003 /* Now allocate and setup the descriptor. */
2004 tr_size = sizeof(struct cppi5_tr_type1_t);
2005 d = udma_alloc_tr_desc(uc, tr_size, sglen, dir);
2006 if (!d)
2007 return NULL;
2008
2009 d->sglen = sglen;
2010
2011 tr_req = d->hwdesc[0].tr_req_base;
2012 for_each_sg(sgl, sgent, sglen, i) {
2013 d->residue += sg_dma_len(sgent);
2014
2015 cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false,
2016 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2017 cppi5_tr_csf_set(&tr_req[i].flags, CPPI5_TR_CSF_SUPR_EVT);
2018
2019 tr_req[i].addr = sg_dma_address(sgent);
2020 tr_req[i].icnt0 = burst * dev_width;
2021 tr_req[i].dim1 = burst * dev_width;
2022 tr_req[i].icnt1 = sg_dma_len(sgent) / tr_req[i].icnt0;
2023 }
2024
2025 cppi5_tr_csf_set(&tr_req[i - 1].flags, CPPI5_TR_CSF_EOP);
2026
2027 return d;
2028}
2029
2030static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
2031 enum dma_slave_buswidth dev_width,
2032 u16 elcnt)
2033{
2034 if (uc->config.ep_type != PSIL_EP_PDMA_XY)
2035 return 0;
2036
2037 /* Bus width translates to the element size (ES) */
2038 switch (dev_width) {
2039 case DMA_SLAVE_BUSWIDTH_1_BYTE:
2040 d->static_tr.elsize = 0;
2041 break;
2042 case DMA_SLAVE_BUSWIDTH_2_BYTES:
2043 d->static_tr.elsize = 1;
2044 break;
2045 case DMA_SLAVE_BUSWIDTH_3_BYTES:
2046 d->static_tr.elsize = 2;
2047 break;
2048 case DMA_SLAVE_BUSWIDTH_4_BYTES:
2049 d->static_tr.elsize = 3;
2050 break;
2051 case DMA_SLAVE_BUSWIDTH_8_BYTES:
2052 d->static_tr.elsize = 4;
2053 break;
2054 default: /* not reached */
2055 return -EINVAL;
2056 }
2057
2058 d->static_tr.elcnt = elcnt;
2059
2060 /*
2061 * PDMA must to close the packet when the channel is in packet mode.
2062 * For TR mode when the channel is not cyclic we also need PDMA to close
2063 * the packet otherwise the transfer will stall because PDMA holds on
2064 * the data it has received from the peripheral.
2065 */
2066 if (uc->config.pkt_mode || !uc->cyclic) {
2067 unsigned int div = dev_width * elcnt;
2068
2069 if (uc->cyclic)
2070 d->static_tr.bstcnt = d->residue / d->sglen / div;
2071 else
2072 d->static_tr.bstcnt = d->residue / div;
2073
2074 if (uc->config.dir == DMA_DEV_TO_MEM &&
2075 d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
2076 return -EINVAL;
2077 } else {
2078 d->static_tr.bstcnt = 0;
2079 }
2080
2081 return 0;
2082}
2083
2084static struct udma_desc *
2085udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
2086 unsigned int sglen, enum dma_transfer_direction dir,
2087 unsigned long tx_flags, void *context)
2088{
2089 struct scatterlist *sgent;
2090 struct cppi5_host_desc_t *h_desc = NULL;
2091 struct udma_desc *d;
2092 u32 ring_id;
2093 unsigned int i;
2094
2095 d = kzalloc(sizeof(*d) + sglen * sizeof(d->hwdesc[0]), GFP_NOWAIT);
2096 if (!d)
2097 return NULL;
2098
2099 d->sglen = sglen;
2100 d->hwdesc_count = sglen;
2101
2102 if (dir == DMA_DEV_TO_MEM)
2103 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2104 else
2105 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2106
2107 for_each_sg(sgl, sgent, sglen, i) {
2108 struct udma_hwdesc *hwdesc = &d->hwdesc[i];
2109 dma_addr_t sg_addr = sg_dma_address(sgent);
2110 struct cppi5_host_desc_t *desc;
2111 size_t sg_len = sg_dma_len(sgent);
2112
2113 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
2114 GFP_NOWAIT,
2115 &hwdesc->cppi5_desc_paddr);
2116 if (!hwdesc->cppi5_desc_vaddr) {
2117 dev_err(uc->ud->dev,
2118 "descriptor%d allocation failed\n", i);
2119
2120 udma_free_hwdesc(uc, d);
2121 kfree(d);
2122 return NULL;
2123 }
2124
2125 d->residue += sg_len;
2126 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
2127 desc = hwdesc->cppi5_desc_vaddr;
2128
2129 if (i == 0) {
2130 cppi5_hdesc_init(desc, 0, 0);
2131 /* Flow and Packed ID */
2132 cppi5_desc_set_pktids(&desc->hdr, uc->id,
2133 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2134 cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id);
2135 } else {
2136 cppi5_hdesc_reset_hbdesc(desc);
2137 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff);
2138 }
2139
2140 /* attach the sg buffer to the descriptor */
2141 cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len);
2142
2143 /* Attach link as host buffer descriptor */
2144 if (h_desc)
2145 cppi5_hdesc_link_hbdesc(h_desc,
2146 hwdesc->cppi5_desc_paddr);
2147
2148 if (dir == DMA_MEM_TO_DEV)
2149 h_desc = desc;
2150 }
2151
2152 if (d->residue >= SZ_4M) {
2153 dev_err(uc->ud->dev,
2154 "%s: Transfer size %u is over the supported 4M range\n",
2155 __func__, d->residue);
2156 udma_free_hwdesc(uc, d);
2157 kfree(d);
2158 return NULL;
2159 }
2160
2161 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2162 cppi5_hdesc_set_pktlen(h_desc, d->residue);
2163
2164 return d;
2165}
2166
2167static int udma_attach_metadata(struct dma_async_tx_descriptor *desc,
2168 void *data, size_t len)
2169{
2170 struct udma_desc *d = to_udma_desc(desc);
2171 struct udma_chan *uc = to_udma_chan(desc->chan);
2172 struct cppi5_host_desc_t *h_desc;
2173 u32 psd_size = len;
2174 u32 flags = 0;
2175
2176 if (!uc->config.pkt_mode || !uc->config.metadata_size)
2177 return -ENOTSUPP;
2178
2179 if (!data || len > uc->config.metadata_size)
2180 return -EINVAL;
2181
2182 if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE)
2183 return -EINVAL;
2184
2185 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2186 if (d->dir == DMA_MEM_TO_DEV)
2187 memcpy(h_desc->epib, data, len);
2188
2189 if (uc->config.needs_epib)
2190 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
2191
2192 d->metadata = data;
2193 d->metadata_size = len;
2194 if (uc->config.needs_epib)
2195 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
2196
2197 cppi5_hdesc_update_flags(h_desc, flags);
2198 cppi5_hdesc_update_psdata_size(h_desc, psd_size);
2199
2200 return 0;
2201}
2202
2203static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
2204 size_t *payload_len, size_t *max_len)
2205{
2206 struct udma_desc *d = to_udma_desc(desc);
2207 struct udma_chan *uc = to_udma_chan(desc->chan);
2208 struct cppi5_host_desc_t *h_desc;
2209
2210 if (!uc->config.pkt_mode || !uc->config.metadata_size)
2211 return ERR_PTR(-ENOTSUPP);
2212
2213 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2214
2215 *max_len = uc->config.metadata_size;
2216
2217 *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ?
2218 CPPI5_INFO0_HDESC_EPIB_SIZE : 0;
2219 *payload_len += cppi5_hdesc_get_psdata_size(h_desc);
2220
2221 return h_desc->epib;
2222}
2223
2224static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc,
2225 size_t payload_len)
2226{
2227 struct udma_desc *d = to_udma_desc(desc);
2228 struct udma_chan *uc = to_udma_chan(desc->chan);
2229 struct cppi5_host_desc_t *h_desc;
2230 u32 psd_size = payload_len;
2231 u32 flags = 0;
2232
2233 if (!uc->config.pkt_mode || !uc->config.metadata_size)
2234 return -ENOTSUPP;
2235
2236 if (payload_len > uc->config.metadata_size)
2237 return -EINVAL;
2238
2239 if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE)
2240 return -EINVAL;
2241
2242 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2243
2244 if (uc->config.needs_epib) {
2245 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
2246 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
2247 }
2248
2249 cppi5_hdesc_update_flags(h_desc, flags);
2250 cppi5_hdesc_update_psdata_size(h_desc, psd_size);
2251
2252 return 0;
2253}
2254
2255static struct dma_descriptor_metadata_ops metadata_ops = {
2256 .attach = udma_attach_metadata,
2257 .get_ptr = udma_get_metadata_ptr,
2258 .set_len = udma_set_metadata_len,
2259};
2260
2261static struct dma_async_tx_descriptor *
2262udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2263 unsigned int sglen, enum dma_transfer_direction dir,
2264 unsigned long tx_flags, void *context)
2265{
2266 struct udma_chan *uc = to_udma_chan(chan);
2267 enum dma_slave_buswidth dev_width;
2268 struct udma_desc *d;
2269 u32 burst;
2270
2271 if (dir != uc->config.dir) {
2272 dev_err(chan->device->dev,
2273 "%s: chan%d is for %s, not supporting %s\n",
2274 __func__, uc->id,
2275 dmaengine_get_direction_text(uc->config.dir),
2276 dmaengine_get_direction_text(dir));
2277 return NULL;
2278 }
2279
2280 if (dir == DMA_DEV_TO_MEM) {
2281 dev_width = uc->cfg.src_addr_width;
2282 burst = uc->cfg.src_maxburst;
2283 } else if (dir == DMA_MEM_TO_DEV) {
2284 dev_width = uc->cfg.dst_addr_width;
2285 burst = uc->cfg.dst_maxburst;
2286 } else {
2287 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
2288 return NULL;
2289 }
2290
2291 if (!burst)
2292 burst = 1;
2293
2294 if (uc->config.pkt_mode)
2295 d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
2296 context);
2297 else
2298 d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
2299 context);
2300
2301 if (!d)
2302 return NULL;
2303
2304 d->dir = dir;
2305 d->desc_idx = 0;
2306 d->tr_idx = 0;
2307
2308 /* static TR for remote PDMA */
2309 if (udma_configure_statictr(uc, d, dev_width, burst)) {
2310 dev_err(uc->ud->dev,
Colin Ian King6c0157b2020-01-22 09:38:18 +00002311 "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002312 __func__, d->static_tr.bstcnt);
2313
2314 udma_free_hwdesc(uc, d);
2315 kfree(d);
2316 return NULL;
2317 }
2318
2319 if (uc->config.metadata_size)
2320 d->vd.tx.metadata_ops = &metadata_ops;
2321
2322 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
2323}
2324
2325static struct udma_desc *
2326udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
2327 size_t buf_len, size_t period_len,
2328 enum dma_transfer_direction dir, unsigned long flags)
2329{
2330 enum dma_slave_buswidth dev_width;
2331 struct udma_desc *d;
2332 size_t tr_size;
2333 struct cppi5_tr_type1_t *tr_req;
2334 unsigned int i;
2335 unsigned int periods = buf_len / period_len;
2336 u32 burst;
2337
2338 if (dir == DMA_DEV_TO_MEM) {
2339 dev_width = uc->cfg.src_addr_width;
2340 burst = uc->cfg.src_maxburst;
2341 } else if (dir == DMA_MEM_TO_DEV) {
2342 dev_width = uc->cfg.dst_addr_width;
2343 burst = uc->cfg.dst_maxburst;
2344 } else {
2345 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
2346 return NULL;
2347 }
2348
2349 if (!burst)
2350 burst = 1;
2351
2352 /* Now allocate and setup the descriptor. */
2353 tr_size = sizeof(struct cppi5_tr_type1_t);
2354 d = udma_alloc_tr_desc(uc, tr_size, periods, dir);
2355 if (!d)
2356 return NULL;
2357
2358 tr_req = d->hwdesc[0].tr_req_base;
2359 for (i = 0; i < periods; i++) {
2360 cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false,
2361 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2362
2363 tr_req[i].addr = buf_addr + period_len * i;
2364 tr_req[i].icnt0 = dev_width;
2365 tr_req[i].icnt1 = period_len / dev_width;
2366 tr_req[i].dim1 = dev_width;
2367
2368 if (!(flags & DMA_PREP_INTERRUPT))
2369 cppi5_tr_csf_set(&tr_req[i].flags,
2370 CPPI5_TR_CSF_SUPR_EVT);
2371 }
2372
2373 return d;
2374}
2375
2376static struct udma_desc *
2377udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr,
2378 size_t buf_len, size_t period_len,
2379 enum dma_transfer_direction dir, unsigned long flags)
2380{
2381 struct udma_desc *d;
2382 u32 ring_id;
2383 int i;
2384 int periods = buf_len / period_len;
2385
2386 if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1))
2387 return NULL;
2388
2389 if (period_len >= SZ_4M)
2390 return NULL;
2391
2392 d = kzalloc(sizeof(*d) + periods * sizeof(d->hwdesc[0]), GFP_NOWAIT);
2393 if (!d)
2394 return NULL;
2395
2396 d->hwdesc_count = periods;
2397
2398 /* TODO: re-check this... */
2399 if (dir == DMA_DEV_TO_MEM)
2400 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2401 else
2402 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2403
2404 for (i = 0; i < periods; i++) {
2405 struct udma_hwdesc *hwdesc = &d->hwdesc[i];
2406 dma_addr_t period_addr = buf_addr + (period_len * i);
2407 struct cppi5_host_desc_t *h_desc;
2408
2409 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
2410 GFP_NOWAIT,
2411 &hwdesc->cppi5_desc_paddr);
2412 if (!hwdesc->cppi5_desc_vaddr) {
2413 dev_err(uc->ud->dev,
2414 "descriptor%d allocation failed\n", i);
2415
2416 udma_free_hwdesc(uc, d);
2417 kfree(d);
2418 return NULL;
2419 }
2420
2421 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
2422 h_desc = hwdesc->cppi5_desc_vaddr;
2423
2424 cppi5_hdesc_init(h_desc, 0, 0);
2425 cppi5_hdesc_set_pktlen(h_desc, period_len);
2426
2427 /* Flow and Packed ID */
2428 cppi5_desc_set_pktids(&h_desc->hdr, uc->id,
2429 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2430 cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id);
2431
2432 /* attach each period to a new descriptor */
2433 cppi5_hdesc_attach_buf(h_desc,
2434 period_addr, period_len,
2435 period_addr, period_len);
2436 }
2437
2438 return d;
2439}
2440
2441static struct dma_async_tx_descriptor *
2442udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
2443 size_t period_len, enum dma_transfer_direction dir,
2444 unsigned long flags)
2445{
2446 struct udma_chan *uc = to_udma_chan(chan);
2447 enum dma_slave_buswidth dev_width;
2448 struct udma_desc *d;
2449 u32 burst;
2450
2451 if (dir != uc->config.dir) {
2452 dev_err(chan->device->dev,
2453 "%s: chan%d is for %s, not supporting %s\n",
2454 __func__, uc->id,
2455 dmaengine_get_direction_text(uc->config.dir),
2456 dmaengine_get_direction_text(dir));
2457 return NULL;
2458 }
2459
2460 uc->cyclic = true;
2461
2462 if (dir == DMA_DEV_TO_MEM) {
2463 dev_width = uc->cfg.src_addr_width;
2464 burst = uc->cfg.src_maxburst;
2465 } else if (dir == DMA_MEM_TO_DEV) {
2466 dev_width = uc->cfg.dst_addr_width;
2467 burst = uc->cfg.dst_maxburst;
2468 } else {
2469 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
2470 return NULL;
2471 }
2472
2473 if (!burst)
2474 burst = 1;
2475
2476 if (uc->config.pkt_mode)
2477 d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len,
2478 dir, flags);
2479 else
2480 d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len,
2481 dir, flags);
2482
2483 if (!d)
2484 return NULL;
2485
2486 d->sglen = buf_len / period_len;
2487
2488 d->dir = dir;
2489 d->residue = buf_len;
2490
2491 /* static TR for remote PDMA */
2492 if (udma_configure_statictr(uc, d, dev_width, burst)) {
2493 dev_err(uc->ud->dev,
Colin Ian King6c0157b2020-01-22 09:38:18 +00002494 "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002495 __func__, d->static_tr.bstcnt);
2496
2497 udma_free_hwdesc(uc, d);
2498 kfree(d);
2499 return NULL;
2500 }
2501
2502 if (uc->config.metadata_size)
2503 d->vd.tx.metadata_ops = &metadata_ops;
2504
2505 return vchan_tx_prep(&uc->vc, &d->vd, flags);
2506}
2507
2508static struct dma_async_tx_descriptor *
2509udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
2510 size_t len, unsigned long tx_flags)
2511{
2512 struct udma_chan *uc = to_udma_chan(chan);
2513 struct udma_desc *d;
2514 struct cppi5_tr_type15_t *tr_req;
2515 int num_tr;
2516 size_t tr_size = sizeof(struct cppi5_tr_type15_t);
2517 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2518
2519 if (uc->config.dir != DMA_MEM_TO_MEM) {
2520 dev_err(chan->device->dev,
2521 "%s: chan%d is for %s, not supporting %s\n",
2522 __func__, uc->id,
2523 dmaengine_get_direction_text(uc->config.dir),
2524 dmaengine_get_direction_text(DMA_MEM_TO_MEM));
2525 return NULL;
2526 }
2527
2528 if (len < SZ_64K) {
2529 num_tr = 1;
2530 tr0_cnt0 = len;
2531 tr0_cnt1 = 1;
2532 } else {
2533 unsigned long align_to = __ffs(src | dest);
2534
2535 if (align_to > 3)
2536 align_to = 3;
2537 /*
2538 * Keep simple: tr0: SZ_64K-alignment blocks,
2539 * tr1: the remaining
2540 */
2541 num_tr = 2;
2542 tr0_cnt0 = (SZ_64K - BIT(align_to));
2543 if (len / tr0_cnt0 >= SZ_64K) {
2544 dev_err(uc->ud->dev, "size %zu is not supported\n",
2545 len);
2546 return NULL;
2547 }
2548
2549 tr0_cnt1 = len / tr0_cnt0;
2550 tr1_cnt0 = len % tr0_cnt0;
2551 }
2552
2553 d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM);
2554 if (!d)
2555 return NULL;
2556
2557 d->dir = DMA_MEM_TO_MEM;
2558 d->desc_idx = 0;
2559 d->tr_idx = 0;
2560 d->residue = len;
2561
2562 tr_req = d->hwdesc[0].tr_req_base;
2563
2564 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
2565 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2566 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
2567
2568 tr_req[0].addr = src;
2569 tr_req[0].icnt0 = tr0_cnt0;
2570 tr_req[0].icnt1 = tr0_cnt1;
2571 tr_req[0].icnt2 = 1;
2572 tr_req[0].icnt3 = 1;
2573 tr_req[0].dim1 = tr0_cnt0;
2574
2575 tr_req[0].daddr = dest;
2576 tr_req[0].dicnt0 = tr0_cnt0;
2577 tr_req[0].dicnt1 = tr0_cnt1;
2578 tr_req[0].dicnt2 = 1;
2579 tr_req[0].dicnt3 = 1;
2580 tr_req[0].ddim1 = tr0_cnt0;
2581
2582 if (num_tr == 2) {
2583 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
2584 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2585 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
2586
2587 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
2588 tr_req[1].icnt0 = tr1_cnt0;
2589 tr_req[1].icnt1 = 1;
2590 tr_req[1].icnt2 = 1;
2591 tr_req[1].icnt3 = 1;
2592
2593 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
2594 tr_req[1].dicnt0 = tr1_cnt0;
2595 tr_req[1].dicnt1 = 1;
2596 tr_req[1].dicnt2 = 1;
2597 tr_req[1].dicnt3 = 1;
2598 }
2599
2600 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
2601
2602 if (uc->config.metadata_size)
2603 d->vd.tx.metadata_ops = &metadata_ops;
2604
2605 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
2606}
2607
2608static void udma_issue_pending(struct dma_chan *chan)
2609{
2610 struct udma_chan *uc = to_udma_chan(chan);
2611 unsigned long flags;
2612
2613 spin_lock_irqsave(&uc->vc.lock, flags);
2614
2615 /* If we have something pending and no active descriptor, then */
2616 if (vchan_issue_pending(&uc->vc) && !uc->desc) {
2617 /*
2618 * start a descriptor if the channel is NOT [marked as
2619 * terminating _and_ it is still running (teardown has not
2620 * completed yet)].
2621 */
2622 if (!(uc->state == UDMA_CHAN_IS_TERMINATING &&
2623 udma_is_chan_running(uc)))
2624 udma_start(uc);
2625 }
2626
2627 spin_unlock_irqrestore(&uc->vc.lock, flags);
2628}
2629
2630static enum dma_status udma_tx_status(struct dma_chan *chan,
2631 dma_cookie_t cookie,
2632 struct dma_tx_state *txstate)
2633{
2634 struct udma_chan *uc = to_udma_chan(chan);
2635 enum dma_status ret;
2636 unsigned long flags;
2637
2638 spin_lock_irqsave(&uc->vc.lock, flags);
2639
2640 ret = dma_cookie_status(chan, cookie, txstate);
2641
2642 if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc))
2643 ret = DMA_PAUSED;
2644
2645 if (ret == DMA_COMPLETE || !txstate)
2646 goto out;
2647
2648 if (uc->desc && uc->desc->vd.tx.cookie == cookie) {
2649 u32 peer_bcnt = 0;
2650 u32 bcnt = 0;
2651 u32 residue = uc->desc->residue;
2652 u32 delay = 0;
2653
2654 if (uc->desc->dir == DMA_MEM_TO_DEV) {
2655 bcnt = udma_tchanrt_read(uc->tchan,
2656 UDMA_TCHAN_RT_SBCNT_REG);
2657
2658 if (uc->config.ep_type != PSIL_EP_NATIVE) {
2659 peer_bcnt = udma_tchanrt_read(uc->tchan,
2660 UDMA_TCHAN_RT_PEER_BCNT_REG);
2661
2662 if (bcnt > peer_bcnt)
2663 delay = bcnt - peer_bcnt;
2664 }
2665 } else if (uc->desc->dir == DMA_DEV_TO_MEM) {
2666 bcnt = udma_rchanrt_read(uc->rchan,
2667 UDMA_RCHAN_RT_BCNT_REG);
2668
2669 if (uc->config.ep_type != PSIL_EP_NATIVE) {
2670 peer_bcnt = udma_rchanrt_read(uc->rchan,
2671 UDMA_RCHAN_RT_PEER_BCNT_REG);
2672
2673 if (peer_bcnt > bcnt)
2674 delay = peer_bcnt - bcnt;
2675 }
2676 } else {
2677 bcnt = udma_tchanrt_read(uc->tchan,
2678 UDMA_TCHAN_RT_BCNT_REG);
2679 }
2680
2681 bcnt -= uc->bcnt;
2682 if (bcnt && !(bcnt % uc->desc->residue))
2683 residue = 0;
2684 else
2685 residue -= bcnt % uc->desc->residue;
2686
2687 if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) {
2688 ret = DMA_COMPLETE;
2689 delay = 0;
2690 }
2691
2692 dma_set_residue(txstate, residue);
2693 dma_set_in_flight_bytes(txstate, delay);
2694
2695 } else {
2696 ret = DMA_COMPLETE;
2697 }
2698
2699out:
2700 spin_unlock_irqrestore(&uc->vc.lock, flags);
2701 return ret;
2702}
2703
2704static int udma_pause(struct dma_chan *chan)
2705{
2706 struct udma_chan *uc = to_udma_chan(chan);
2707
2708 if (!uc->desc)
2709 return -EINVAL;
2710
2711 /* pause the channel */
2712 switch (uc->desc->dir) {
2713 case DMA_DEV_TO_MEM:
2714 udma_rchanrt_update_bits(uc->rchan,
2715 UDMA_RCHAN_RT_PEER_RT_EN_REG,
2716 UDMA_PEER_RT_EN_PAUSE,
2717 UDMA_PEER_RT_EN_PAUSE);
2718 break;
2719 case DMA_MEM_TO_DEV:
2720 udma_tchanrt_update_bits(uc->tchan,
2721 UDMA_TCHAN_RT_PEER_RT_EN_REG,
2722 UDMA_PEER_RT_EN_PAUSE,
2723 UDMA_PEER_RT_EN_PAUSE);
2724 break;
2725 case DMA_MEM_TO_MEM:
2726 udma_tchanrt_update_bits(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
2727 UDMA_CHAN_RT_CTL_PAUSE,
2728 UDMA_CHAN_RT_CTL_PAUSE);
2729 break;
2730 default:
2731 return -EINVAL;
2732 }
2733
2734 return 0;
2735}
2736
2737static int udma_resume(struct dma_chan *chan)
2738{
2739 struct udma_chan *uc = to_udma_chan(chan);
2740
2741 if (!uc->desc)
2742 return -EINVAL;
2743
2744 /* resume the channel */
2745 switch (uc->desc->dir) {
2746 case DMA_DEV_TO_MEM:
2747 udma_rchanrt_update_bits(uc->rchan,
2748 UDMA_RCHAN_RT_PEER_RT_EN_REG,
2749 UDMA_PEER_RT_EN_PAUSE, 0);
2750
2751 break;
2752 case DMA_MEM_TO_DEV:
2753 udma_tchanrt_update_bits(uc->tchan,
2754 UDMA_TCHAN_RT_PEER_RT_EN_REG,
2755 UDMA_PEER_RT_EN_PAUSE, 0);
2756 break;
2757 case DMA_MEM_TO_MEM:
2758 udma_tchanrt_update_bits(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
2759 UDMA_CHAN_RT_CTL_PAUSE, 0);
2760 break;
2761 default:
2762 return -EINVAL;
2763 }
2764
2765 return 0;
2766}
2767
2768static int udma_terminate_all(struct dma_chan *chan)
2769{
2770 struct udma_chan *uc = to_udma_chan(chan);
2771 unsigned long flags;
2772 LIST_HEAD(head);
2773
2774 spin_lock_irqsave(&uc->vc.lock, flags);
2775
2776 if (udma_is_chan_running(uc))
2777 udma_stop(uc);
2778
2779 if (uc->desc) {
2780 uc->terminated_desc = uc->desc;
2781 uc->desc = NULL;
2782 uc->terminated_desc->terminated = true;
2783 cancel_delayed_work(&uc->tx_drain.work);
2784 }
2785
2786 uc->paused = false;
2787
2788 vchan_get_all_descriptors(&uc->vc, &head);
2789 spin_unlock_irqrestore(&uc->vc.lock, flags);
2790 vchan_dma_desc_free_list(&uc->vc, &head);
2791
2792 return 0;
2793}
2794
2795static void udma_synchronize(struct dma_chan *chan)
2796{
2797 struct udma_chan *uc = to_udma_chan(chan);
2798 unsigned long timeout = msecs_to_jiffies(1000);
2799
2800 vchan_synchronize(&uc->vc);
2801
2802 if (uc->state == UDMA_CHAN_IS_TERMINATING) {
2803 timeout = wait_for_completion_timeout(&uc->teardown_completed,
2804 timeout);
2805 if (!timeout) {
2806 dev_warn(uc->ud->dev, "chan%d teardown timeout!\n",
2807 uc->id);
2808 udma_dump_chan_stdata(uc);
2809 udma_reset_chan(uc, true);
2810 }
2811 }
2812
2813 udma_reset_chan(uc, false);
2814 if (udma_is_chan_running(uc))
2815 dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
2816
2817 cancel_delayed_work_sync(&uc->tx_drain.work);
2818 udma_reset_rings(uc);
2819}
2820
2821static void udma_desc_pre_callback(struct virt_dma_chan *vc,
2822 struct virt_dma_desc *vd,
2823 struct dmaengine_result *result)
2824{
2825 struct udma_chan *uc = to_udma_chan(&vc->chan);
2826 struct udma_desc *d;
2827
2828 if (!vd)
2829 return;
2830
2831 d = to_udma_desc(&vd->tx);
2832
2833 if (d->metadata_size)
2834 udma_fetch_epib(uc, d);
2835
2836 /* Provide residue information for the client */
2837 if (result) {
2838 void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
2839
2840 if (cppi5_desc_get_type(desc_vaddr) ==
2841 CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
2842 result->residue = d->residue -
2843 cppi5_hdesc_get_pktlen(desc_vaddr);
2844 if (result->residue)
2845 result->result = DMA_TRANS_ABORTED;
2846 else
2847 result->result = DMA_TRANS_NOERROR;
2848 } else {
2849 result->residue = 0;
2850 result->result = DMA_TRANS_NOERROR;
2851 }
2852 }
2853}
2854
2855/*
2856 * This tasklet handles the completion of a DMA descriptor by
2857 * calling its callback and freeing it.
2858 */
2859static void udma_vchan_complete(unsigned long arg)
2860{
2861 struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
2862 struct virt_dma_desc *vd, *_vd;
2863 struct dmaengine_desc_callback cb;
2864 LIST_HEAD(head);
2865
2866 spin_lock_irq(&vc->lock);
2867 list_splice_tail_init(&vc->desc_completed, &head);
2868 vd = vc->cyclic;
2869 if (vd) {
2870 vc->cyclic = NULL;
2871 dmaengine_desc_get_callback(&vd->tx, &cb);
2872 } else {
2873 memset(&cb, 0, sizeof(cb));
2874 }
2875 spin_unlock_irq(&vc->lock);
2876
2877 udma_desc_pre_callback(vc, vd, NULL);
2878 dmaengine_desc_callback_invoke(&cb, NULL);
2879
2880 list_for_each_entry_safe(vd, _vd, &head, node) {
2881 struct dmaengine_result result;
2882
2883 dmaengine_desc_get_callback(&vd->tx, &cb);
2884
2885 list_del(&vd->node);
2886
2887 udma_desc_pre_callback(vc, vd, &result);
2888 dmaengine_desc_callback_invoke(&cb, &result);
2889
2890 vchan_vdesc_fini(vd);
2891 }
2892}
2893
2894static void udma_free_chan_resources(struct dma_chan *chan)
2895{
2896 struct udma_chan *uc = to_udma_chan(chan);
2897 struct udma_dev *ud = to_udma_dev(chan->device);
2898
2899 udma_terminate_all(chan);
2900 if (uc->terminated_desc) {
2901 udma_reset_chan(uc, false);
2902 udma_reset_rings(uc);
2903 }
2904
2905 cancel_delayed_work_sync(&uc->tx_drain.work);
2906 destroy_delayed_work_on_stack(&uc->tx_drain.work);
2907
2908 if (uc->irq_num_ring > 0) {
2909 free_irq(uc->irq_num_ring, uc);
2910
2911 uc->irq_num_ring = 0;
2912 }
2913 if (uc->irq_num_udma > 0) {
2914 free_irq(uc->irq_num_udma, uc);
2915
2916 uc->irq_num_udma = 0;
2917 }
2918
2919 /* Release PSI-L pairing */
2920 if (uc->psil_paired) {
2921 navss_psil_unpair(ud, uc->config.src_thread,
2922 uc->config.dst_thread);
2923 uc->psil_paired = false;
2924 }
2925
2926 vchan_free_chan_resources(&uc->vc);
2927 tasklet_kill(&uc->vc.task);
2928
2929 udma_free_tx_resources(uc);
2930 udma_free_rx_resources(uc);
2931 udma_reset_uchan(uc);
2932
2933 if (uc->use_dma_pool) {
2934 dma_pool_destroy(uc->hdesc_pool);
2935 uc->use_dma_pool = false;
2936 }
2937}
2938
2939static struct platform_driver udma_driver;
2940
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02002941struct udma_filter_param {
2942 int remote_thread_id;
2943 u32 atype;
2944};
2945
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002946static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
2947{
2948 struct udma_chan_config *ucc;
2949 struct psil_endpoint_config *ep_config;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02002950 struct udma_filter_param *filter_param;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002951 struct udma_chan *uc;
2952 struct udma_dev *ud;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002953
2954 if (chan->device->dev->driver != &udma_driver.driver)
2955 return false;
2956
2957 uc = to_udma_chan(chan);
2958 ucc = &uc->config;
2959 ud = uc->ud;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02002960 filter_param = param;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002961
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02002962 if (filter_param->atype > 2) {
2963 dev_err(ud->dev, "Invalid channel atype: %u\n",
2964 filter_param->atype);
2965 return false;
2966 }
2967
2968 ucc->remote_thread_id = filter_param->remote_thread_id;
2969 ucc->atype = filter_param->atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002970
2971 if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
2972 ucc->dir = DMA_MEM_TO_DEV;
2973 else
2974 ucc->dir = DMA_DEV_TO_MEM;
2975
2976 ep_config = psil_get_ep_config(ucc->remote_thread_id);
2977 if (IS_ERR(ep_config)) {
2978 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
2979 ucc->remote_thread_id);
2980 ucc->dir = DMA_MEM_TO_MEM;
2981 ucc->remote_thread_id = -1;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02002982 ucc->atype = 0;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002983 return false;
2984 }
2985
2986 ucc->pkt_mode = ep_config->pkt_mode;
2987 ucc->channel_tpl = ep_config->channel_tpl;
2988 ucc->notdpkt = ep_config->notdpkt;
2989 ucc->ep_type = ep_config->ep_type;
2990
2991 if (ucc->ep_type != PSIL_EP_NATIVE) {
2992 const struct udma_match_data *match_data = ud->match_data;
2993
2994 if (match_data->flags & UDMA_FLAG_PDMA_ACC32)
2995 ucc->enable_acc32 = ep_config->pdma_acc32;
2996 if (match_data->flags & UDMA_FLAG_PDMA_BURST)
2997 ucc->enable_burst = ep_config->pdma_burst;
2998 }
2999
3000 ucc->needs_epib = ep_config->needs_epib;
3001 ucc->psd_size = ep_config->psd_size;
3002 ucc->metadata_size =
3003 (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) +
3004 ucc->psd_size;
3005
3006 if (ucc->pkt_mode)
3007 ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
3008 ucc->metadata_size, ud->desc_align);
3009
3010 dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id,
3011 ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir));
3012
3013 return true;
3014}
3015
3016static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
3017 struct of_dma *ofdma)
3018{
3019 struct udma_dev *ud = ofdma->of_dma_data;
3020 dma_cap_mask_t mask = ud->ddev.cap_mask;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003021 struct udma_filter_param filter_param;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003022 struct dma_chan *chan;
3023
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003024 if (dma_spec->args_count != 1 && dma_spec->args_count != 2)
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003025 return NULL;
3026
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003027 filter_param.remote_thread_id = dma_spec->args[0];
3028 if (dma_spec->args_count == 2)
3029 filter_param.atype = dma_spec->args[1];
3030 else
3031 filter_param.atype = 0;
3032
3033 chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
3034 ofdma->of_node);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003035 if (!chan) {
3036 dev_err(ud->dev, "get channel fail in %s.\n", __func__);
3037 return ERR_PTR(-EINVAL);
3038 }
3039
3040 return chan;
3041}
3042
3043static struct udma_match_data am654_main_data = {
3044 .psil_base = 0x1000,
3045 .enable_memcpy_support = true,
3046 .statictr_z_mask = GENMASK(11, 0),
3047 .rchan_oes_offset = 0x2000,
3048 .tpl_levels = 2,
3049 .level_start_idx = {
3050 [0] = 8, /* Normal channels */
3051 [1] = 0, /* High Throughput channels */
3052 },
3053};
3054
3055static struct udma_match_data am654_mcu_data = {
3056 .psil_base = 0x6000,
Grygorii Strashkod7024192019-12-23 13:04:51 +02003057 .enable_memcpy_support = true, /* TEST: DMA domains */
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003058 .statictr_z_mask = GENMASK(11, 0),
3059 .rchan_oes_offset = 0x2000,
3060 .tpl_levels = 2,
3061 .level_start_idx = {
3062 [0] = 2, /* Normal channels */
3063 [1] = 0, /* High Throughput channels */
3064 },
3065};
3066
3067static struct udma_match_data j721e_main_data = {
3068 .psil_base = 0x1000,
3069 .enable_memcpy_support = true,
3070 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
3071 .statictr_z_mask = GENMASK(23, 0),
3072 .rchan_oes_offset = 0x400,
3073 .tpl_levels = 3,
3074 .level_start_idx = {
3075 [0] = 16, /* Normal channels */
3076 [1] = 4, /* High Throughput channels */
3077 [2] = 0, /* Ultra High Throughput channels */
3078 },
3079};
3080
3081static struct udma_match_data j721e_mcu_data = {
3082 .psil_base = 0x6000,
3083 .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
3084 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
3085 .statictr_z_mask = GENMASK(23, 0),
3086 .rchan_oes_offset = 0x400,
3087 .tpl_levels = 2,
3088 .level_start_idx = {
3089 [0] = 2, /* Normal channels */
3090 [1] = 0, /* High Throughput channels */
3091 },
3092};
3093
3094static const struct of_device_id udma_of_match[] = {
3095 {
3096 .compatible = "ti,am654-navss-main-udmap",
3097 .data = &am654_main_data,
3098 },
3099 {
3100 .compatible = "ti,am654-navss-mcu-udmap",
3101 .data = &am654_mcu_data,
3102 }, {
3103 .compatible = "ti,j721e-navss-main-udmap",
3104 .data = &j721e_main_data,
3105 }, {
3106 .compatible = "ti,j721e-navss-mcu-udmap",
3107 .data = &j721e_mcu_data,
3108 },
3109 { /* Sentinel */ },
3110};
3111
3112static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
3113{
3114 struct resource *res;
3115 int i;
3116
3117 for (i = 0; i < MMR_LAST; i++) {
3118 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3119 mmr_names[i]);
3120 ud->mmrs[i] = devm_ioremap_resource(&pdev->dev, res);
3121 if (IS_ERR(ud->mmrs[i]))
3122 return PTR_ERR(ud->mmrs[i]);
3123 }
3124
3125 return 0;
3126}
3127
3128static int udma_setup_resources(struct udma_dev *ud)
3129{
3130 struct device *dev = ud->dev;
3131 int ch_count, ret, i, j;
3132 u32 cap2, cap3;
3133 struct ti_sci_resource_desc *rm_desc;
3134 struct ti_sci_resource *rm_res, irq_res;
3135 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
3136 static const char * const range_names[] = { "ti,sci-rm-range-tchan",
3137 "ti,sci-rm-range-rchan",
3138 "ti,sci-rm-range-rflow" };
3139
3140 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
3141 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
3142
3143 ud->rflow_cnt = cap3 & 0x3fff;
3144 ud->tchan_cnt = cap2 & 0x1ff;
3145 ud->echan_cnt = (cap2 >> 9) & 0x1ff;
3146 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
3147 ch_count = ud->tchan_cnt + ud->rchan_cnt;
3148
3149 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
3150 sizeof(unsigned long), GFP_KERNEL);
3151 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
3152 GFP_KERNEL);
3153 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
3154 sizeof(unsigned long), GFP_KERNEL);
3155 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
3156 GFP_KERNEL);
3157 ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
3158 sizeof(unsigned long),
3159 GFP_KERNEL);
3160 ud->rflow_gp_map_allocated = devm_kcalloc(dev,
3161 BITS_TO_LONGS(ud->rflow_cnt),
3162 sizeof(unsigned long),
3163 GFP_KERNEL);
3164 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
3165 sizeof(unsigned long),
3166 GFP_KERNEL);
3167 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
3168 GFP_KERNEL);
3169
3170 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
3171 !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
3172 !ud->rflows || !ud->rflow_in_use)
3173 return -ENOMEM;
3174
3175 /*
3176 * RX flows with the same Ids as RX channels are reserved to be used
3177 * as default flows if remote HW can't generate flow_ids. Those
3178 * RX flows can be requested only explicitly by id.
3179 */
3180 bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);
3181
3182 /* by default no GP rflows are assigned to Linux */
3183 bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
3184
3185 /* Get resource ranges from tisci */
3186 for (i = 0; i < RM_RANGE_LAST; i++)
3187 tisci_rm->rm_ranges[i] =
3188 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
3189 tisci_rm->tisci_dev_id,
3190 (char *)range_names[i]);
3191
3192 /* tchan ranges */
3193 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
3194 if (IS_ERR(rm_res)) {
3195 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
3196 } else {
3197 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
3198 for (i = 0; i < rm_res->sets; i++) {
3199 rm_desc = &rm_res->desc[i];
3200 bitmap_clear(ud->tchan_map, rm_desc->start,
3201 rm_desc->num);
3202 dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
3203 rm_desc->start, rm_desc->num);
3204 }
3205 }
3206 irq_res.sets = rm_res->sets;
3207
3208 /* rchan and matching default flow ranges */
3209 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
3210 if (IS_ERR(rm_res)) {
3211 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
3212 } else {
3213 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
3214 for (i = 0; i < rm_res->sets; i++) {
3215 rm_desc = &rm_res->desc[i];
3216 bitmap_clear(ud->rchan_map, rm_desc->start,
3217 rm_desc->num);
3218 dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
3219 rm_desc->start, rm_desc->num);
3220 }
3221 }
3222
3223 irq_res.sets += rm_res->sets;
3224 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
3225 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
3226 for (i = 0; i < rm_res->sets; i++) {
3227 irq_res.desc[i].start = rm_res->desc[i].start;
3228 irq_res.desc[i].num = rm_res->desc[i].num;
3229 }
3230 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
3231 for (j = 0; j < rm_res->sets; j++, i++) {
3232 irq_res.desc[i].start = rm_res->desc[j].start +
3233 ud->match_data->rchan_oes_offset;
3234 irq_res.desc[i].num = rm_res->desc[j].num;
3235 }
3236 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
3237 kfree(irq_res.desc);
3238 if (ret) {
3239 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
3240 return ret;
3241 }
3242
3243 /* GP rflow ranges */
3244 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
3245 if (IS_ERR(rm_res)) {
3246 /* all gp flows are assigned exclusively to Linux */
3247 bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
3248 ud->rflow_cnt - ud->rchan_cnt);
3249 } else {
3250 for (i = 0; i < rm_res->sets; i++) {
3251 rm_desc = &rm_res->desc[i];
3252 bitmap_clear(ud->rflow_gp_map, rm_desc->start,
3253 rm_desc->num);
3254 dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n",
3255 rm_desc->start, rm_desc->num);
3256 }
3257 }
3258
3259 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
3260 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
3261 if (!ch_count)
3262 return -ENODEV;
3263
3264 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
3265 GFP_KERNEL);
3266 if (!ud->channels)
3267 return -ENOMEM;
3268
3269 dev_info(dev, "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
3270 ch_count,
3271 ud->tchan_cnt - bitmap_weight(ud->tchan_map, ud->tchan_cnt),
3272 ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt),
3273 ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
3274 ud->rflow_cnt));
3275
3276 return ch_count;
3277}
3278
Peter Ujfalusidb8d9b42020-03-06 16:28:38 +02003279#ifdef CONFIG_DEBUG_FS
3280static void udma_dbg_summary_show_chan(struct seq_file *s,
3281 struct dma_chan *chan)
3282{
3283 struct udma_chan *uc = to_udma_chan(chan);
3284 struct udma_chan_config *ucc = &uc->config;
3285
3286 seq_printf(s, " %-13s| %s", dma_chan_name(chan),
3287 chan->dbg_client_name ?: "in-use");
3288 seq_printf(s, " (%s, ", dmaengine_get_direction_text(uc->config.dir));
3289
3290 switch (uc->config.dir) {
3291 case DMA_MEM_TO_MEM:
3292 seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id,
3293 ucc->src_thread, ucc->dst_thread);
3294 break;
3295 case DMA_DEV_TO_MEM:
3296 seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id,
3297 ucc->src_thread, ucc->dst_thread);
3298 break;
3299 case DMA_MEM_TO_DEV:
3300 seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id,
3301 ucc->src_thread, ucc->dst_thread);
3302 break;
3303 default:
3304 seq_printf(s, ")\n");
3305 return;
3306 }
3307
3308 if (ucc->ep_type == PSIL_EP_NATIVE) {
3309 seq_printf(s, "PSI-L Native");
3310 if (ucc->metadata_size) {
3311 seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : "");
3312 if (ucc->psd_size)
3313 seq_printf(s, " PSDsize:%u", ucc->psd_size);
3314 seq_printf(s, " ]");
3315 }
3316 } else {
3317 seq_printf(s, "PDMA");
3318 if (ucc->enable_acc32 || ucc->enable_burst)
3319 seq_printf(s, "[%s%s ]",
3320 ucc->enable_acc32 ? " ACC32" : "",
3321 ucc->enable_burst ? " BURST" : "");
3322 }
3323
3324 seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode");
3325}
3326
3327static void udma_dbg_summary_show(struct seq_file *s,
3328 struct dma_device *dma_dev)
3329{
3330 struct dma_chan *chan;
3331
3332 list_for_each_entry(chan, &dma_dev->channels, device_node) {
3333 if (chan->client_count)
3334 udma_dbg_summary_show_chan(s, chan);
3335 }
3336}
3337#endif /* CONFIG_DEBUG_FS */
3338
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003339#define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
3340 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
3341 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
3342 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
3343 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
3344
3345static int udma_probe(struct platform_device *pdev)
3346{
3347 struct device_node *navss_node = pdev->dev.parent->of_node;
3348 struct device *dev = &pdev->dev;
3349 struct udma_dev *ud;
3350 const struct of_device_id *match;
3351 int i, ret;
3352 int ch_count;
3353
3354 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
3355 if (ret)
3356 dev_err(dev, "failed to set dma mask stuff\n");
3357
3358 ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL);
3359 if (!ud)
3360 return -ENOMEM;
3361
3362 ret = udma_get_mmrs(pdev, ud);
3363 if (ret)
3364 return ret;
3365
3366 ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci");
3367 if (IS_ERR(ud->tisci_rm.tisci))
3368 return PTR_ERR(ud->tisci_rm.tisci);
3369
3370 ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id",
3371 &ud->tisci_rm.tisci_dev_id);
3372 if (ret) {
3373 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
3374 return ret;
3375 }
3376 pdev->id = ud->tisci_rm.tisci_dev_id;
3377
3378 ret = of_property_read_u32(navss_node, "ti,sci-dev-id",
3379 &ud->tisci_rm.tisci_navss_dev_id);
3380 if (ret) {
3381 dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret);
3382 return ret;
3383 }
3384
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003385 ret = of_property_read_u32(navss_node, "ti,udma-atype", &ud->atype);
3386 if (!ret && ud->atype > 2) {
3387 dev_err(dev, "Invalid atype: %u\n", ud->atype);
3388 return -EINVAL;
3389 }
3390
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003391 ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
3392 ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
3393
3394 ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
3395 if (IS_ERR(ud->ringacc))
3396 return PTR_ERR(ud->ringacc);
3397
3398 dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
3399 DOMAIN_BUS_TI_SCI_INTA_MSI);
3400 if (!dev->msi_domain) {
3401 dev_err(dev, "Failed to get MSI domain\n");
3402 return -EPROBE_DEFER;
3403 }
3404
3405 match = of_match_node(udma_of_match, dev->of_node);
3406 if (!match) {
3407 dev_err(dev, "No compatible match found\n");
3408 return -ENODEV;
3409 }
3410 ud->match_data = match->data;
3411
3412 dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
3413 dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
3414
3415 ud->ddev.device_alloc_chan_resources = udma_alloc_chan_resources;
3416 ud->ddev.device_config = udma_slave_config;
3417 ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
3418 ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
3419 ud->ddev.device_issue_pending = udma_issue_pending;
3420 ud->ddev.device_tx_status = udma_tx_status;
3421 ud->ddev.device_pause = udma_pause;
3422 ud->ddev.device_resume = udma_resume;
3423 ud->ddev.device_terminate_all = udma_terminate_all;
3424 ud->ddev.device_synchronize = udma_synchronize;
Peter Ujfalusidb8d9b42020-03-06 16:28:38 +02003425#ifdef CONFIG_DEBUG_FS
3426 ud->ddev.dbg_summary_show = udma_dbg_summary_show;
3427#endif
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003428
3429 ud->ddev.device_free_chan_resources = udma_free_chan_resources;
3430 ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
3431 ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
3432 ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
3433 ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
3434 ud->ddev.copy_align = DMAENGINE_ALIGN_8_BYTES;
3435 ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
3436 DESC_METADATA_ENGINE;
3437 if (ud->match_data->enable_memcpy_support) {
3438 dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
3439 ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
3440 ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
3441 }
3442
3443 ud->ddev.dev = dev;
3444 ud->dev = dev;
3445 ud->psil_base = ud->match_data->psil_base;
3446
3447 INIT_LIST_HEAD(&ud->ddev.channels);
3448 INIT_LIST_HEAD(&ud->desc_to_purge);
3449
3450 ch_count = udma_setup_resources(ud);
3451 if (ch_count <= 0)
3452 return ch_count;
3453
3454 spin_lock_init(&ud->lock);
3455 INIT_WORK(&ud->purge_work, udma_purge_desc_work);
3456
3457 ud->desc_align = 64;
3458 if (ud->desc_align < dma_get_cache_alignment())
3459 ud->desc_align = dma_get_cache_alignment();
3460
3461 for (i = 0; i < ud->tchan_cnt; i++) {
3462 struct udma_tchan *tchan = &ud->tchans[i];
3463
3464 tchan->id = i;
3465 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000;
3466 }
3467
3468 for (i = 0; i < ud->rchan_cnt; i++) {
3469 struct udma_rchan *rchan = &ud->rchans[i];
3470
3471 rchan->id = i;
3472 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000;
3473 }
3474
3475 for (i = 0; i < ud->rflow_cnt; i++) {
3476 struct udma_rflow *rflow = &ud->rflows[i];
3477
3478 rflow->id = i;
3479 }
3480
3481 for (i = 0; i < ch_count; i++) {
3482 struct udma_chan *uc = &ud->channels[i];
3483
3484 uc->ud = ud;
3485 uc->vc.desc_free = udma_desc_free;
3486 uc->id = i;
3487 uc->tchan = NULL;
3488 uc->rchan = NULL;
3489 uc->config.remote_thread_id = -1;
3490 uc->config.dir = DMA_MEM_TO_MEM;
3491 uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
3492 dev_name(dev), i);
3493
3494 vchan_init(&uc->vc, &ud->ddev);
3495 /* Use custom vchan completion handling */
3496 tasklet_init(&uc->vc.task, udma_vchan_complete,
3497 (unsigned long)&uc->vc);
3498 init_completion(&uc->teardown_completed);
3499 }
3500
3501 ret = dma_async_device_register(&ud->ddev);
3502 if (ret) {
3503 dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
3504 return ret;
3505 }
3506
3507 platform_set_drvdata(pdev, ud);
3508
3509 ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud);
3510 if (ret) {
3511 dev_err(dev, "failed to register of_dma controller\n");
3512 dma_async_device_unregister(&ud->ddev);
3513 }
3514
3515 return ret;
3516}
3517
3518static struct platform_driver udma_driver = {
3519 .driver = {
3520 .name = "ti-udma",
3521 .of_match_table = udma_of_match,
3522 .suppress_bind_attrs = true,
3523 },
3524 .probe = udma_probe,
3525};
3526builtin_platform_driver(udma_driver);
Grygorii Strashkod7024192019-12-23 13:04:51 +02003527
3528/* Private interfaces to UDMA */
3529#include "k3-udma-private.c"